mirror of
https://github.com/containers/skopeo.git
synced 2026-01-30 22:08:44 +00:00
Compare commits
35 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
404c5bd341 | ||
|
|
2134209960 | ||
|
|
1e8c029562 | ||
|
|
932b037d66 | ||
|
|
26a48586a0 | ||
|
|
683f4263ef | ||
|
|
ebfa1e936b | ||
|
|
509782e78b | ||
|
|
776b408f76 | ||
|
|
fee5981ebf | ||
|
|
d9e9604979 | ||
|
|
3606380bdb | ||
|
|
640b967463 | ||
|
|
b8b9913695 | ||
|
|
9e2720dfcc | ||
|
|
b329dd0d4e | ||
|
|
1b10352591 | ||
|
|
bba2874451 | ||
|
|
0322441640 | ||
|
|
8868d2ebe4 | ||
|
|
f19acc1c90 | ||
|
|
47f24b4097 | ||
|
|
c2597aab22 | ||
|
|
47065938da | ||
|
|
790620024e | ||
|
|
42b01df89e | ||
|
|
aafae2bc50 | ||
|
|
e5b9ea5ee6 | ||
|
|
1c2ff140cb | ||
|
|
f7c608e65e | ||
|
|
ec810c91fe | ||
|
|
17bea86e89 | ||
|
|
3e0026d907 | ||
|
|
3e98377bf2 | ||
|
|
0658bc80f3 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
/docs/skopeo.1
|
||||
*.1
|
||||
/layers-*
|
||||
/skopeo
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
@@ -21,4 +22,4 @@ install:
|
||||
|
||||
script:
|
||||
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then hack/travis_osx.sh ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make check ; fi
|
||||
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then make vendor && ./hack/tree_status.sh && make check ; fi
|
||||
|
||||
@@ -115,6 +115,35 @@ Use your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
If you set your `user.name` and `user.email` git configs, you can sign your
|
||||
commit automatically with `git commit -s`.
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
|
||||
## Communications
|
||||
|
||||
For general questions, or discussions, please use the
|
||||
|
||||
26
Makefile
26
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build-container docs build-local clean install install-binary install-completions shell test-integration vendor
|
||||
.PHONY: all binary build-container docs docs-in-container build-local clean install install-binary install-completions shell test-integration .install.vndr vendor
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
@@ -22,9 +22,9 @@ CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
GOMD2MAN ?= $(shell command -v go-md2man || echo '$(GOBIN)/go-md2man')
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
@@ -51,9 +51,10 @@ GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh)
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh) $(shell hack/btrfs_installed_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
OSTREE_BUILD_TAG = $(shell hack/ostree_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(OSTREE_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
@@ -64,7 +65,7 @@ endif
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
all: binary docs-in-container
|
||||
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
@@ -89,10 +90,15 @@ build-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
@sed -e 's/\((skopeo.*\.md)\)//' -e 's/\[\(skopeo.*\)\]/\1/' $< | $(GOMD2MAN) -in /dev/stdin -out $@
|
||||
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
|
||||
docs-in-container:
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make docs $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
clean:
|
||||
rm -f skopeo docs/*.1
|
||||
|
||||
@@ -140,5 +146,9 @@ validate-local:
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor: vendor.conf
|
||||
vndr -whitelist '^github.com/containers/image/docs/.*'
|
||||
.install.vndr:
|
||||
$(GO) get -u github.com/LK4D4/vndr
|
||||
|
||||
vendor: vendor.conf .install.vndr
|
||||
$(GOPATH)/bin/vndr \
|
||||
-whitelist '^github.com/containers/image/docs/.*'
|
||||
|
||||
29
README.md
29
README.md
@@ -229,34 +229,7 @@ NOT TODO
|
||||
CONTRIBUTING
|
||||
-
|
||||
|
||||
### Dependencies management
|
||||
|
||||
Make sure [`vndr`](https://github.com/LK4D4/vndr) is installed.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `make vendor`
|
||||
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `make vendor`
|
||||
|
||||
When new PRs for [containers/image](https://github.com/containers/image) break `skopeo` (i.e. `containers/image` tests fail in `make test-skopeo`):
|
||||
|
||||
- create out a new branch in your `skopeo` checkout and switch to it
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `make vendor`
|
||||
- make any other necessary changes in the skopeo repo (e.g. add other dependencies now requied by `containers/image`, or update skopeo for changed `containers/image` API)
|
||||
- optionally add new integration tests to the skopeo repo
|
||||
- submit the resulting branch as a skopeo PR, marked “DO NOT MERGE”
|
||||
- iterate until tests pass and the PR is reviewed
|
||||
- then the original `containers/image` PR can be merged, disregarding its `make test-skopeo` failure
|
||||
- as soon as possible after that, in the skopeo PR, restore the `containers/image` line in `vendor.conf` to use `containers/image:master`
|
||||
- run `make vendor`
|
||||
- update the skopeo PR with the result, drop the “DO NOT MERGE” marking
|
||||
- after tests complete succcesfully again, merge the skopeo PR
|
||||
Please read the [contribution guide](CONTRIBUTING.md) if you want to collaborate in the project.
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -23,6 +23,8 @@ type copyOptions struct {
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
quiet bool // Suppress output information when copying images
|
||||
|
||||
}
|
||||
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
@@ -55,6 +57,11 @@ func copyCmd(global *globalOptions) cli.Command {
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
Value: &opts.additionalTags, // Surprisingly StringSliceFlag does not support Destination:, but modifies Value: in place.
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "quiet, q",
|
||||
Usage: "Suppress output information when copying images",
|
||||
Destination: &opts.quiet,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
@@ -132,6 +139,9 @@ func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if opts.quiet {
|
||||
stdout = nil
|
||||
}
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
|
||||
@@ -5,20 +5,37 @@
|
||||
_complete_() {
|
||||
local options_with_args=$1
|
||||
local boolean_options="$2 -h --help"
|
||||
local transports=$3
|
||||
|
||||
case "$prev" in
|
||||
$options_with_args)
|
||||
return
|
||||
;;
|
||||
esac
|
||||
local option_with_args
|
||||
for option_with_args in $options_with_args $transports
|
||||
do
|
||||
if [ "$option_with_args" == "$prev" -o "$option_with_args" == "$cur" ]
|
||||
then
|
||||
return
|
||||
fi
|
||||
done
|
||||
|
||||
case "$cur" in
|
||||
-*)
|
||||
COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) )
|
||||
;;
|
||||
*)
|
||||
if [ -n "$transports" ]
|
||||
then
|
||||
compopt -o nospace
|
||||
COMPREPLY=( $( compgen -W "$transports" -- "$cur" ) )
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
_skopeo_supported_transports() {
|
||||
local subcommand=$1
|
||||
|
||||
${PROG} $subcommand --help | grep "Supported transports" -A 1 | tail -n 1 | sed -e 's/,/:/g' -e 's/$/:/'
|
||||
}
|
||||
|
||||
_skopeo_copy() {
|
||||
local options_with_args="
|
||||
--authfile
|
||||
@@ -40,7 +57,11 @@ _skopeo_copy() {
|
||||
--remove-signatures
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_inspect() {
|
||||
@@ -53,7 +74,12 @@ _skopeo_inspect() {
|
||||
--raw
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_standalone_sign() {
|
||||
@@ -90,7 +116,12 @@ _skopeo_delete() {
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
"
|
||||
_complete_ "$options_with_args" "$boolean_options"
|
||||
|
||||
local transports="
|
||||
$(_skopeo_supported_transports $(echo $FUNCNAME | sed 's/_skopeo_//'))
|
||||
"
|
||||
|
||||
_complete_ "$options_with_args" "$boolean_options" "$transports"
|
||||
}
|
||||
|
||||
_skopeo_layers() {
|
||||
|
||||
79
docs/skopeo-copy.1.md
Normal file
79
docs/skopeo-copy.1.md
Normal file
@@ -0,0 +1,79 @@
|
||||
% skopeo-copy(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-copy - Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
|
||||
## DESCRIPTION
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
## OPTIONS
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--quiet, -q** suppress output information when copying images
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
50
docs/skopeo-delete.1.md
Normal file
50
docs/skopeo-delete.1.md
Normal file
@@ -0,0 +1,50 @@
|
||||
% skopeo-delete(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-delete - Mark _image-name_ for deletion.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
61
docs/skopeo-inspect.1.md
Normal file
61
docs/skopeo-inspect.1.md
Normal file
@@ -0,0 +1,61 @@
|
||||
% skopeo-inspect(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-inspect - Return low-level information about _image-name_ in a registry
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `podman login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
skopeo(1), podman-login(1), docker-login(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
26
docs/skopeo-manifest-digest.1.md
Normal file
26
docs/skopeo-manifest-digest.1.md
Normal file
@@ -0,0 +1,26 @@
|
||||
% skopeo-manifest-digest(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-manifest\-digest -Compute a manifest digest of manifest-file and write it to standard output.
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
34
docs/skopeo-standalone-sign.1.md
Normal file
34
docs/skopeo-standalone-sign.1.md
Normal file
@@ -0,0 +1,34 @@
|
||||
% skopeo-standalone-sign(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone-sign - Simple Sign an image
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
## DESCRIPTION
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1), skopeo-copy(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
36
docs/skopeo-standalone-verify.1.md
Normal file
36
docs/skopeo-standalone-verify.1.md
Normal file
@@ -0,0 +1,36 @@
|
||||
% skopeo-standalone-verify(1)
|
||||
|
||||
## NAME
|
||||
skopeo\-standalone\-verify - Verify an image signature
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
## SEE ALSO
|
||||
skopeo(1)
|
||||
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
239
docs/skopeo.1.md
239
docs/skopeo.1.md
@@ -1,11 +1,13 @@
|
||||
% SKOPEO(1) Skopeo Man Pages
|
||||
% Jhon Honce
|
||||
% August 2016
|
||||
# NAME
|
||||
## NAME
|
||||
skopeo -- Command line utility used to interact with local and remote container images and container image registries
|
||||
# SYNOPSIS
|
||||
|
||||
## SYNOPSIS
|
||||
**skopeo** [_global options_] _command_ [_command options_]
|
||||
# DESCRIPTION
|
||||
|
||||
## DESCRIPTION
|
||||
`skopeo` is a command line utility providing various operations with container images and container image registries.
|
||||
|
||||
`skopeo` can copy container images between various containers image stores, converting them as necessary. For example you can use `skopeo` to copy container images from one container registry to another.
|
||||
@@ -31,7 +33,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
An existing local directory _path_ storing the manifest, layer tarballs and signatures as individual files. This is a non-standardized format, primarily useful for debugging or noninvasive container inspection.
|
||||
|
||||
**docker://**_docker-reference_
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(kpod login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
An image in a registry implementing the "Docker Registry HTTP API V2". By default, uses the authorization state in either `$XDG_RUNTIME_DIR/containers/auth.json`, which is set using `(podman login)`. If the authorization state is not found there, `$HOME/.docker/config.json` is checked, which is set using `(docker login)`.
|
||||
|
||||
**docker-archive:**_path_[**:**_docker-reference_]
|
||||
An image is stored in the `docker save` formatted file. _docker-reference_ is only used when creating such a file, and it must not contain a digest.
|
||||
@@ -45,7 +47,7 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
**ostree:**_image_[**@**_/absolute/repo/path_]
|
||||
An image in local OSTree repository. _/absolute/repo/path_ defaults to _/ostree/repo_.
|
||||
|
||||
# OPTIONS
|
||||
## OPTIONS
|
||||
|
||||
**--debug** enable debug output
|
||||
|
||||
@@ -65,139 +67,18 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
# COMMANDS
|
||||
## COMMANDS
|
||||
|
||||
## skopeo copy
|
||||
**skopeo copy** [**--sign-by=**_key-ID_] _source-image destination-image_
|
||||
| Command | Description |
|
||||
| ----------------------------------------- | ------------------------------------------------------------------------------ |
|
||||
| [skopeo-copy(1)](skopeo-copy.1.md) | Copy an image (manifest, filesystem layers, signatures) from one location to another. |
|
||||
| [skopeo-delete(1)](skopeo-delete.1.md) | Mark image-name for deletion. |
|
||||
| [skopeo-inspect(1)](skopeo-inspect.1.md) | Return low-level information about image-name in a registry. |
|
||||
| [skopeo-manifest-digest(1)](skopeo-manifest-digest.1.md) | Compute a manifest digest of manifest-file and write it to standard output.|
|
||||
| [skopeo-standalone-sign(1)](skopeo-standalone-sign.1.md) | Sign an image. |
|
||||
| [skopeo-standalone-verify(1)](skopeo-standalone-verify.1.md)| Verity an image. |
|
||||
|
||||
Copy an image (manifest, filesystem layers, signatures) from one location to another.
|
||||
|
||||
Uses the system's trust policy to validate images, rejects images not trusted by the policy.
|
||||
|
||||
_source-image_ use the "image name" format described above
|
||||
|
||||
_destination-image_ use the "image name" format described above
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--format, -f** _manifest-type_ Manifest type (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)
|
||||
|
||||
**--remove-signatures** do not copy signatures, if any, from _source-image_. Necessary when copying a signed image to a destination which does not support signatures.
|
||||
|
||||
**--sign-by=**_key-id_ add a signature using that key ID for an image name corresponding to _destination-image_
|
||||
|
||||
**--src-creds** _username[:password]_ for accessing the source registry
|
||||
|
||||
**--dest-compress** _bool-value_ Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)
|
||||
|
||||
**--dest-creds** _username[:password]_ for accessing the destination registry
|
||||
|
||||
**--src-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the source registry or daemon
|
||||
|
||||
**--src-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container source registry or daemon (defaults to true)
|
||||
|
||||
**--dest-cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the destination registry or daemon
|
||||
|
||||
**--dest-ostree-tmp-dir** _path_ Directory to use for OSTree temporary files.
|
||||
|
||||
**--dest-tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container destination registry or daemon (defaults to true)
|
||||
|
||||
**--src-daemon-host** _host_ Copy from docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
**--dest-daemon-host** _host_ Copy to docker daemon at _host_. If _host_ starts with `tcp://`, HTTPS is enabled by default. To use plain HTTP, use the form `http://` (default is `unix:///var/run/docker.sock`).
|
||||
|
||||
Existing signatures, if any, are preserved as well.
|
||||
|
||||
## skopeo delete
|
||||
**skopeo delete** _image-name_
|
||||
|
||||
Mark _image-name_ for deletion. To release the allocated disk space, you must login to the container registry server and execute the container registry garbage collector. E.g.,
|
||||
|
||||
```
|
||||
/usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
Note: sometimes the config.yml is stored in /etc/docker/registry/config.yml
|
||||
|
||||
If you are running the container registry inside of a container you would execute something like:
|
||||
|
||||
$ docker exec -it registry /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml
|
||||
|
||||
```
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
Additionally, the registry must allow deletions by setting `REGISTRY_STORAGE_DELETE_ENABLED=true` for the registry daemon.
|
||||
|
||||
## skopeo inspect
|
||||
**skopeo inspect** [**--raw**] _image-name_
|
||||
|
||||
Return low-level information about _image-name_ in a registry
|
||||
|
||||
**--raw** output raw manifest, default is to format in JSON
|
||||
|
||||
_image-name_ name of image to retrieve information about
|
||||
|
||||
**--authfile** _path_
|
||||
|
||||
Path of the authentication file. Default is ${XDG_RUNTIME\_DIR}/containers/auth.json, which is set using `kpod login`.
|
||||
If the authorization state is not found there, $HOME/.docker/config.json is checked, which is set using `docker login`.
|
||||
|
||||
**--creds** _username[:password]_ for accessing the registry
|
||||
|
||||
**--cert-dir** _path_ Use certificates at _path_ (*.crt, *.cert, *.key) to connect to the registry
|
||||
|
||||
**--tls-verify** _bool-value_ Require HTTPS and verify certificates when talking to container registries (defaults to true)
|
||||
|
||||
## skopeo manifest-digest
|
||||
**skopeo manifest-digest** _manifest-file_
|
||||
|
||||
Compute a manifest digest of _manifest-file_ and write it to standard output.
|
||||
|
||||
## skopeo standalone-sign
|
||||
**skopeo standalone-sign** _manifest docker-reference key-fingerprint_ **--output**|**-o** _signature_
|
||||
|
||||
This is primarily a debugging tool, or useful for special cases,
|
||||
and usually should not be a part of your normal operational workflow; use `skopeo copy --sign-by` instead to publish and sign an image in one step.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference to identify the image with
|
||||
|
||||
_key-fingerprint_ Key identity to use for signing
|
||||
|
||||
**--output**|**-o** output file
|
||||
|
||||
## skopeo standalone-verify
|
||||
**skopeo standalone-verify** _manifest docker-reference key-fingerprint signature_
|
||||
|
||||
Verify a signature using local files, digest will be printed on success.
|
||||
|
||||
_manifest_ Path to a file containing the image manifest
|
||||
|
||||
_docker-reference_ A docker reference expected to identify the image in the signature
|
||||
|
||||
_key-fingerprint_ Expected identity of the signing key
|
||||
|
||||
_signature_ Path to signature file
|
||||
|
||||
**Note:** If you do use this, make sure that the image can not be changed at the source location between the times of its verification and use.
|
||||
|
||||
## skopeo help
|
||||
show help for `skopeo`
|
||||
|
||||
# FILES
|
||||
## FILES
|
||||
**/etc/containers/policy.json**
|
||||
Default trust policy file, if **--policy** is not specified.
|
||||
The policy format is documented in https://github.com/containers/image/blob/master/docs/policy.json.md .
|
||||
@@ -206,89 +87,9 @@ show help for `skopeo`
|
||||
Default directory containing registry configuration, if **--registries.d** is not specified.
|
||||
The contents of this directory are documented in https://github.com/containers/image/blob/master/docs/registries.d.md .
|
||||
|
||||
# EXAMPLES
|
||||
## SEE ALSO
|
||||
podman-login(1), docker-login(1)
|
||||
|
||||
## skopeo copy
|
||||
To copy the layers of the docker.io busybox image to a local directory:
|
||||
```sh
|
||||
$ mkdir -p /var/lib/images/busybox
|
||||
$ skopeo copy docker://busybox:latest dir:/var/lib/images/busybox
|
||||
$ ls /var/lib/images/busybox/*
|
||||
/tmp/busybox/2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749.tar
|
||||
/tmp/busybox/manifest.json
|
||||
/tmp/busybox/8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
```
|
||||
|
||||
To copy and sign an image:
|
||||
|
||||
```sh
|
||||
$ skopeo copy --sign-by dev@example.com atomic:example/busybox:streaming atomic:example/busybox:gold
|
||||
```
|
||||
## skopeo delete
|
||||
Mark image example/pause for deletion from the registry.example.com registry:
|
||||
```sh
|
||||
$ skopeo delete --force docker://registry.example.com/example/pause:latest
|
||||
```
|
||||
See above for additional details on using the command **delete**.
|
||||
|
||||
## skopeo inspect
|
||||
To review information for the image fedora from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo inspect docker://docker.io/fedora
|
||||
{
|
||||
"Name": "docker.io/library/fedora",
|
||||
"Digest": "sha256:a97914edb6ba15deb5c5acf87bd6bd5b6b0408c96f48a5cbd450b5b04509bb7d",
|
||||
"RepoTags": [
|
||||
"20",
|
||||
"21",
|
||||
"22",
|
||||
"23",
|
||||
"24",
|
||||
"heisenbug",
|
||||
"latest",
|
||||
"rawhide"
|
||||
],
|
||||
"Created": "2016-06-20T19:33:43.220526898Z",
|
||||
"DockerVersion": "1.10.3",
|
||||
"Labels": {},
|
||||
"Architecture": "amd64",
|
||||
"Os": "linux",
|
||||
"Layers": [
|
||||
"sha256:7c91a140e7a1025c3bc3aace4c80c0d9933ac4ee24b8630a6b0b5d8b9ce6b9d4"
|
||||
]
|
||||
}
|
||||
```
|
||||
## skopeo layers
|
||||
Another method to retrieve the layers for the busybox image from the docker.io registry:
|
||||
```sh
|
||||
$ skopeo layers docker://busybox
|
||||
$ ls layers-500650331/
|
||||
8ddc19f16526912237dd8af81971d5e4dd0587907234be2b83e249518d5b673f.tar
|
||||
manifest.json
|
||||
a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4.tar
|
||||
```
|
||||
## skopeo manifest-digest
|
||||
```sh
|
||||
$ skopeo manifest-digest manifest.json
|
||||
sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6
|
||||
```
|
||||
## skopeo standalone-sign
|
||||
```sh
|
||||
$ skopeo standalone-sign busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 --output busybox.signature
|
||||
$
|
||||
```
|
||||
|
||||
See `skopeo copy` above for the preferred method of signing images.
|
||||
## skopeo standalone-verify
|
||||
```sh
|
||||
$ skopeo standalone-verify busybox-manifest.json registry.example.com/example/busybox 1D8230F6CDB6A06716E414C1DB72F2188BB46CC8 busybox.signature
|
||||
Signature verified, digest sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55
|
||||
```
|
||||
|
||||
# SEE ALSO
|
||||
kpod-login(1), docker-login(1)
|
||||
|
||||
# AUTHORS
|
||||
## AUTHORS
|
||||
|
||||
Antonio Murdaca <runcom@redhat.com>, Miloslav Trmac <mitr@redhat.com>, Jhon Honce <jhonce@redhat.com>
|
||||
|
||||
|
||||
7
hack/btrfs_installed_tag.sh
Executable file
7
hack/btrfs_installed_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/ioctl.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo exclude_graphdriver_btrfs
|
||||
fi
|
||||
6
hack/ostree_tag.sh
Executable file
6
hack/ostree_tag.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
if pkg-config ostree-1 2> /dev/null ; then
|
||||
echo ostree
|
||||
else
|
||||
echo containers_image_ostree_stub
|
||||
fi
|
||||
13
hack/tree_status.sh
Executable file
13
hack/tree_status.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
STATUS=$(git status --porcelain)
|
||||
if [[ -z $STATUS ]]
|
||||
then
|
||||
echo "tree is clean"
|
||||
else
|
||||
echo "tree is dirty, please commit all changes and sync the vendor.conf"
|
||||
echo ""
|
||||
echo "$STATUS"
|
||||
exit 1
|
||||
fi
|
||||
56
vendor.conf
56
vendor.conf
@@ -1,21 +1,23 @@
|
||||
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image f0cbc16b444d729362c78244c715b45bf4019b71
|
||||
github.com/containers/image v1.5
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27
|
||||
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||
github.com/containers/storage master
|
||||
github.com/containers/storage v1.11
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/pmezard/go-difflib 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
|
||||
github.com/pkg/errors v0.8.1
|
||||
golang.org/x/crypto a4c6cb3142f211c99e4bf4cd769535b29a9b616f
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt master
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
@@ -24,49 +26,41 @@ github.com/vbatts/tar-split v0.10.2
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
golang.org/x/net master
|
||||
golang.org/x/net 45ffb0cd1ba084b73e26dee67e667e1be5acce83
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
# end docker deps
|
||||
golang.org/x/text master
|
||||
github.com/docker/distribution master
|
||||
golang.org/x/text e6919f6577db79269a6443b9dc46d18f2238fb5d
|
||||
github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716
|
||||
# docker/distributions dependencies
|
||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
# end of docker/distribution dependencies
|
||||
github.com/docker/libtrust master
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/runc v1.0.0-rc6
|
||||
github.com/opencontainers/image-spec 7b1e489870acb042978a3935d2fb76f8a79aff81
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
|
||||
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
|
||||
github.com/xeipuuv/gojsonschema v1.1.0
|
||||
go4.org ce4c26f7be8eb27dc77f996b08d286dd80bc4a01 https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
k8s.io/client-go master
|
||||
k8s.io/client-go kubernetes-1.10.13-beta.0
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
# containers/storage's dependencies that aren't already being pulled in
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
github.com/opencontainers/selinux v1.1
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/BurntSushi/toml v0.2.0
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/syndtr/gocapability d98352740cb2c55f81556b63d4a1ec64c5a319c2
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
27
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
27
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
@@ -1,21 +1,14 @@
|
||||
The MIT License (MIT)
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (c) 2013 TOML authors
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
||||
18
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
18
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
@@ -1,17 +1,17 @@
|
||||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/toml-lang/toml
|
||||
Spec: https://github.com/mojombo/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
||||
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
|
||||
|
||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
||||
Documentation: http://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
@@ -26,7 +26,8 @@ go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
|
||||
[](https://travis-ci.org/BurntSushi/toml)
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
@@ -86,7 +87,7 @@ type TOML struct {
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
@@ -119,7 +120,7 @@ for _, s := range favorites.Song {
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
@@ -216,3 +217,4 @@ Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
||||
|
||||
48
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
48
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
@@ -10,9 +10,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func e(format string, args ...interface{}) error {
|
||||
return fmt.Errorf("toml: "+format, args...)
|
||||
}
|
||||
var e = fmt.Errorf
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
@@ -105,13 +103,6 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
||||
}
|
||||
if rv.IsNil() {
|
||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
||||
}
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
@@ -120,7 +111,7 @@ func Decode(data string, v interface{}) (MetaData, error) {
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, indirect(rv))
|
||||
return md, md.unify(p.mapping, rvalue(v))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
@@ -220,7 +211,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("unsupported type %s", rv.Type())
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
@@ -228,7 +219,7 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("unsupported type %s", rv.Kind())
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
@@ -237,8 +228,7 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return e("type mismatch for %s: expected table but found %T",
|
||||
rv.Type().String(), mapping)
|
||||
return mismatch(rv, "map", mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
@@ -263,13 +253,14 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return err
|
||||
return e("Type mismatch for '%s.%s': %s",
|
||||
rv.Type().String(), f.name, err)
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("cannot write unexported field %s.%s",
|
||||
rv.Type().String(), f.name)
|
||||
return e("Field '%s.%s' is unexported, and therefore cannot "+
|
||||
"be loaded with reflection.", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -387,15 +378,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("value %d is out of range for int8", num)
|
||||
return e("Value '%d' is out of range for int8.", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("value %d is out of range for int16", num)
|
||||
return e("Value '%d' is out of range for int16.", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("value %d is out of range for int32", num)
|
||||
return e("Value '%d' is out of range for int32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
@@ -406,15 +397,15 @@ func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("value %d is out of range for uint8", num)
|
||||
return e("Value '%d' is out of range for uint8.", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("value %d is out of range for uint16", num)
|
||||
return e("Value '%d' is out of range for uint16.", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("value %d is out of range for uint32", num)
|
||||
return e("Value '%d' is out of range for uint32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
@@ -480,7 +471,7 @@ func rvalue(v interface{}) reflect.Value {
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanSet() {
|
||||
if v.CanAddr() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
@@ -505,5 +496,10 @@ func isUnifiable(rv reflect.Value) bool {
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
||||
return e("Expected %s but found '%T'.", expected, data)
|
||||
}
|
||||
|
||||
func mismatch(user reflect.Value, expected string, data interface{}) error {
|
||||
return e("Type mismatch for %s. Expected %s but found '%T'.",
|
||||
user.Type().String(), expected, data)
|
||||
}
|
||||
|
||||
3
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
3
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
@@ -77,8 +77,9 @@ func (k Key) maybeQuoted(i int) string {
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
} else {
|
||||
return k[i]
|
||||
}
|
||||
return k[i]
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
|
||||
2
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
2
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
@@ -4,7 +4,7 @@ files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/toml-lang/toml
|
||||
The specification implemented: https://github.com/mojombo/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
|
||||
81
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
81
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
@@ -16,17 +16,17 @@ type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"toml: cannot encode array with mixed element types")
|
||||
"can't encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"toml: cannot encode array with nil element")
|
||||
"can't encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"toml: cannot encode a map with non-string key type")
|
||||
"can't encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"toml: cannot encode an anonymous field that is not a struct")
|
||||
"can't encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"toml: TOML array element cannot contain a table")
|
||||
"TOML array element can't contain a table")
|
||||
errNoKey = errors.New(
|
||||
"toml: top-level values must be Go maps or structs")
|
||||
"top-level values must be a Go map or struct")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
@@ -148,7 +148,7 @@ func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("unsupported type for key '%s': %s", key, k))
|
||||
panic(e("Unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
||||
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
@@ -191,7 +191,7 @@ func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
||||
panic(e("Unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -241,7 +241,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra newline between top-level tables.
|
||||
// Output an extra new line between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
@@ -315,16 +315,10 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
// Treat anonymous struct fields with
|
||||
// tag names as though they are not
|
||||
// anonymous, like encoding/json does.
|
||||
if getOptions(f.Tag).name == "" {
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
}
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct &&
|
||||
getOptions(f.Tag).name == "" {
|
||||
if t.Elem().Kind() == reflect.Struct {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
@@ -353,18 +347,17 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
continue
|
||||
}
|
||||
|
||||
opts := getOptions(sft.Tag)
|
||||
if opts.skip {
|
||||
tag := sft.Tag.Get("toml")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
keyName := sft.Name
|
||||
if opts.name != "" {
|
||||
keyName = opts.name
|
||||
keyName, opts := getOptions(tag)
|
||||
if keyName == "" {
|
||||
keyName = sft.Name
|
||||
}
|
||||
if opts.omitempty && isEmpty(sf) {
|
||||
if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
|
||||
continue
|
||||
}
|
||||
if opts.omitzero && isZero(sf) {
|
||||
} else if _, ok := opts["omitzero"]; ok && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -399,8 +392,9 @@ func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
} else {
|
||||
return tomlArray
|
||||
}
|
||||
return tomlArray
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
@@ -457,30 +451,17 @@ func tomlArrayType(rv reflect.Value) tomlType {
|
||||
return firstType
|
||||
}
|
||||
|
||||
type tagOptions struct {
|
||||
skip bool // "-"
|
||||
name string
|
||||
omitempty bool
|
||||
omitzero bool
|
||||
}
|
||||
|
||||
func getOptions(tag reflect.StructTag) tagOptions {
|
||||
t := tag.Get("toml")
|
||||
if t == "-" {
|
||||
return tagOptions{skip: true}
|
||||
}
|
||||
var opts tagOptions
|
||||
parts := strings.Split(t, ",")
|
||||
opts.name = parts[0]
|
||||
for _, s := range parts[1:] {
|
||||
switch s {
|
||||
case "omitempty":
|
||||
opts.omitempty = true
|
||||
case "omitzero":
|
||||
opts.omitzero = true
|
||||
func getOptions(keyName string) (string, map[string]struct{}) {
|
||||
opts := make(map[string]struct{})
|
||||
ss := strings.Split(keyName, ",")
|
||||
name := ss[0]
|
||||
if len(ss) > 1 {
|
||||
for _, opt := range ss {
|
||||
opts[opt] = struct{}{}
|
||||
}
|
||||
}
|
||||
return opts
|
||||
|
||||
return name, opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
|
||||
494
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
494
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
@@ -3,7 +3,6 @@ package toml
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
@@ -30,28 +29,24 @@ const (
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
itemInlineTableStart
|
||||
itemInlineTableEnd
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
comma = ','
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
inlineTableStart = '{'
|
||||
inlineTableEnd = '}'
|
||||
eof = 0
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
arrayValTerm = ','
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
@@ -60,18 +55,11 @@ type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// Allow for backing up up to three runes.
|
||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
||||
prevWidths [3]int
|
||||
nprev int // how many of prevWidths are in use
|
||||
// If we emit an eof, we can still back up, but it is not OK to call
|
||||
// next again.
|
||||
atEOF bool
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
@@ -99,7 +87,7 @@ func (lx *lexer) nextItem() item {
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input,
|
||||
input: input + "\n",
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
@@ -114,7 +102,7 @@ func (lx *lexer) push(state stateFn) {
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop")
|
||||
return lx.errorf("BUG in lexer: no states to pop.")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
@@ -136,25 +124,16 @@ func (lx *lexer) emitTrim(typ itemType) {
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.atEOF {
|
||||
panic("next called after EOF")
|
||||
}
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.atEOF = true
|
||||
lx.width = 0
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
lx.prevWidths[2] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[0]
|
||||
if lx.nprev < 3 {
|
||||
lx.nprev++
|
||||
}
|
||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.prevWidths[0] = w
|
||||
lx.pos += w
|
||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.pos += lx.width
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -163,20 +142,9 @@ func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only twice between calls to next.
|
||||
// backup steps back one rune. Can be called only once per call of next.
|
||||
func (lx *lexer) backup() {
|
||||
if lx.atEOF {
|
||||
lx.atEOF = false
|
||||
return
|
||||
}
|
||||
if lx.nprev < 1 {
|
||||
panic("backed up too far")
|
||||
}
|
||||
w := lx.prevWidths[0]
|
||||
lx.prevWidths[0] = lx.prevWidths[1]
|
||||
lx.prevWidths[1] = lx.prevWidths[2]
|
||||
lx.nprev--
|
||||
lx.pos -= w
|
||||
lx.pos -= lx.width
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
@@ -198,22 +166,9 @@ func (lx *lexer) peek() rune {
|
||||
return r
|
||||
}
|
||||
|
||||
// skip ignores all input that matches the given predicate.
|
||||
func (lx *lexer) skip(pred func(rune) bool) {
|
||||
for {
|
||||
r := lx.next()
|
||||
if pred(r) {
|
||||
continue
|
||||
}
|
||||
lx.backup()
|
||||
lx.ignore()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (newlines, tabs, etc.).
|
||||
// character (new lines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
@@ -229,6 +184,7 @@ func lexTop(lx *lexer) stateFn {
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
@@ -237,7 +193,7 @@ func lexTop(lx *lexer) stateFn {
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("unexpected EOF")
|
||||
return lx.errorf("Unexpected EOF.")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
@@ -252,12 +208,12 @@ func lexTop(lx *lexer) stateFn {
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
||||
// upon a new line. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a newline for us.
|
||||
// a comment will read to a new line for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
@@ -266,11 +222,11 @@ func lexTopEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
}
|
||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
||||
"comment, or EOF, but got %q instead", r)
|
||||
return lx.errorf("Expected a top-level item to end with a new line, "+
|
||||
"comment or EOF, but got %q instead.", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
@@ -297,22 +253,21 @@ func lexTableEnd(lx *lexer) stateFn {
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
||||
"but got %q instead", arrayTableEnd, r)
|
||||
return lx.errorf("Expected end of table array name delimiter %q, "+
|
||||
"but got %q instead.", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("unexpected end of table name " +
|
||||
"(table names cannot be empty)")
|
||||
return lx.errorf("Unexpected end of table name. (Table names cannot " +
|
||||
"be empty.)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("unexpected table separator " +
|
||||
"(table names cannot be empty)")
|
||||
return lx.errorf("Unexpected table separator. (Table names cannot " +
|
||||
"be empty.)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
@@ -322,22 +277,24 @@ func lexTableNameStart(lx *lexer) stateFn {
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
||||
// lexTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isBareKeyChar(r) {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareTableName
|
||||
case r == tableSep || r == tableEnd:
|
||||
lx.backup()
|
||||
lx.emitTrim(itemText)
|
||||
return lexTableNameEnd
|
||||
default:
|
||||
return lx.errorf("Bare keys cannot contain %q.", r)
|
||||
}
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
return lexTableNameEnd
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
lx.skip(isWhitespace)
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
@@ -347,8 +304,8 @@ func lexTableNameEnd(lx *lexer) stateFn {
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
||||
"but got %q instead", r)
|
||||
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
|
||||
"instead.", r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -358,7 +315,7 @@ func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("unexpected key separator %q", keySep)
|
||||
return lx.errorf("Unexpected key separator %q.", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
@@ -381,15 +338,14 @@ func lexBareKey(lx *lexer) stateFn {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
lx.emitTrim(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emit(itemText)
|
||||
lx.emitTrim(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("bare keys cannot contain %q", r)
|
||||
return lx.errorf("Bare keys cannot contain %q.", r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -402,7 +358,7 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("expected key separator %q, but got %q instead",
|
||||
return lx.errorf("Expected key separator %q, but got %q instead.",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
@@ -411,26 +367,20 @@ func lexKeyEnd(lx *lexer) stateFn {
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT newlines.
|
||||
// In array syntax, the array states are responsible for ignoring newlines.
|
||||
// We allow whitespace to precede a value, but NOT new lines.
|
||||
// In array syntax, the array states are responsible for ignoring new
|
||||
// lines.
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
if isWhitespace(r) {
|
||||
return lexSkip(lx, lexValue)
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
}
|
||||
switch r {
|
||||
case arrayStart:
|
||||
|
||||
switch {
|
||||
case r == arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case inlineTableStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableStart)
|
||||
return lexInlineTableValue
|
||||
case stringStart:
|
||||
case r == stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
@@ -440,7 +390,7 @@ func lexValue(lx *lexer) stateFn {
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case rawStringStart:
|
||||
case r == rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
@@ -450,24 +400,23 @@ func lexValue(lx *lexer) stateFn {
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case '+', '-':
|
||||
case r == 't':
|
||||
return lexTrue
|
||||
case r == 'f':
|
||||
return lexFalse
|
||||
case r == '-':
|
||||
return lexNumberStart
|
||||
case '.': // special error case, be kind to users
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
case r == '.': // special error case, be kind to users
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
}
|
||||
if unicode.IsLetter(r) {
|
||||
// Be permissive here; lexBool will give a nice error if the
|
||||
// user wrote something like
|
||||
// x = foo
|
||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
||||
lx.backup()
|
||||
return lexBool
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", r)
|
||||
return lx.errorf("Expected value but found %q instead.", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and newlines are ignored.
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
@@ -476,11 +425,10 @@ func lexArrayValue(lx *lexer) stateFn {
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == arrayValTerm:
|
||||
return lx.errorf("Unexpected array value terminator %q.",
|
||||
arrayValTerm)
|
||||
case r == arrayEnd:
|
||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
||||
// a trailing comma or not, so we'll allow it.
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
@@ -489,9 +437,8 @@ func lexArrayValue(lx *lexer) stateFn {
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
||||
// and expects either a ',' or a ']'.
|
||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
@@ -500,88 +447,31 @@ func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
case r == arrayValTerm:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf(
|
||||
"expected a comma or array terminator %q, but got %q instead",
|
||||
arrayEnd, r,
|
||||
)
|
||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
||||
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array.
|
||||
// It assumes that a ']' has just been consumed.
|
||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
||||
// just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
||||
func lexInlineTableValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValue)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValue)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
return lx.errorf("unexpected comma")
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
||||
// key/value pair and the next pair (or the end of the table):
|
||||
// it ignores whitespace and expects either a ',' or a '}'.
|
||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexInlineTableValueEnd)
|
||||
case isNL(r):
|
||||
return lx.errorf("newlines not allowed within inline tables")
|
||||
case r == commentStart:
|
||||
lx.push(lexInlineTableValueEnd)
|
||||
return lexCommentStart
|
||||
case r == comma:
|
||||
lx.ignore()
|
||||
return lexInlineTableValue
|
||||
case r == inlineTableEnd:
|
||||
return lexInlineTableEnd
|
||||
}
|
||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
||||
"but got %q instead", inlineTableEnd, r)
|
||||
}
|
||||
|
||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
||||
// It assumes that a '}' has just been consumed.
|
||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemInlineTableEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
@@ -598,12 +488,11 @@ func lexString(lx *lexer) stateFn {
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case '\\':
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '\\':
|
||||
return lexMultilineStringEscape
|
||||
case stringEnd:
|
||||
case r == stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
@@ -627,10 +516,8 @@ func lexMultilineString(lx *lexer) stateFn {
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case isNL(r):
|
||||
return lx.errorf("strings cannot contain newlines")
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
@@ -642,13 +529,12 @@ func lexRawString(lx *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
||||
// a string. It assumes that the beginning "'" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
switch lx.next() {
|
||||
case eof:
|
||||
return lx.errorf("unexpected EOF")
|
||||
case rawStringEnd:
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
@@ -673,10 +559,11 @@ func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
} else {
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
@@ -701,9 +588,10 @@ func lexStringEscape(lx *lexer) stateFn {
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("invalid escape character %q; only the following "+
|
||||
return lx.errorf("Invalid escape character %q. Only the following "+
|
||||
"escape characters are allowed: "+
|
||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
||||
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
|
||||
"\\uXXXX and \\UXXXXXXXX.", r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
@@ -711,8 +599,8 @@ func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
||||
"but got %q instead", lx.current())
|
||||
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
@@ -723,43 +611,40 @@ func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
||||
"but got %q instead", lx.current())
|
||||
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
||||
// lexNumberOrDateStart consumes either a (positive) integer, float or
|
||||
// datetime. It assumes that NO negative sign has been consumed.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexNumberOrDate
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case 'e', 'E':
|
||||
return lexFloat
|
||||
case '.':
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
return lexNumberOrDate
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
||||
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
switch {
|
||||
case r == '-':
|
||||
if lx.pos-lx.start != 5 {
|
||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
||||
}
|
||||
return lexDateAfterYear
|
||||
case isDigit(r):
|
||||
return lexNumberOrDate
|
||||
}
|
||||
switch r {
|
||||
case '-':
|
||||
return lexDatetime
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
@@ -767,34 +652,46 @@ func lexNumberOrDate(lx *lexer) stateFn {
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDatetime consumes a Datetime, to a first approximation.
|
||||
// The parser validates that it matches one of the accepted formats.
|
||||
func lexDatetime(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexDatetime
|
||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
||||
// It assumes that "YYYY-" has already been consumed.
|
||||
func lexDateAfterYear(lx *lexer) stateFn {
|
||||
formats := []rune{
|
||||
// digits are '0'.
|
||||
// everything else is direct equality.
|
||||
'0', '0', '-', '0', '0',
|
||||
'T',
|
||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
||||
'Z',
|
||||
}
|
||||
switch r {
|
||||
case '-', 'T', ':', '.', 'Z', '+':
|
||||
return lexDatetime
|
||||
for _, f := range formats {
|
||||
r := lx.next()
|
||||
if f == '0' {
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
||||
"but found %q instead.", r)
|
||||
}
|
||||
} else if f != r {
|
||||
return lx.errorf("Expected %q in ISO8601 datetime, "+
|
||||
"but found %q instead.", f, r)
|
||||
}
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
||||
// has already been read, but that *no* digits have been consumed.
|
||||
// lexNumberStart will move to the appropriate integer or float states.
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that
|
||||
// a negative sign has already been read, but that *no* digits have been
|
||||
// consumed. lexNumberStart will move to the appropriate integer or float
|
||||
// states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// We MUST see a digit. Even floats have to start with a digit.
|
||||
// we MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("floats must start with a digit, not '.'")
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
return lx.errorf("expected a digit but got %q", r)
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
@@ -802,14 +699,11 @@ func lexNumberStart(lx *lexer) stateFn {
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
switch {
|
||||
case isDigit(r):
|
||||
return lexNumber
|
||||
}
|
||||
switch r {
|
||||
case '_':
|
||||
return lexNumber
|
||||
case '.', 'e', 'E':
|
||||
return lexFloat
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
@@ -817,42 +711,60 @@ func lexNumber(lx *lexer) stateFn {
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
||||
// float-like characters, so floats emitted by the lexer are only a first
|
||||
// approximation and must be validated by the parser.
|
||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
||||
// Namely, at least one digit is required.
|
||||
func lexFloatStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
||||
"%q instead.", r)
|
||||
}
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// lexFloat consumes the digits of a float after a '.'.
|
||||
// Assumes that one digit has been consumed after a '.' already.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
switch r {
|
||||
case '_', '.', '-', '+', 'e', 'E':
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexBool consumes a bool string: 'true' or 'false.
|
||||
func lexBool(lx *lexer) stateFn {
|
||||
var rs []rune
|
||||
for {
|
||||
r := lx.next()
|
||||
if !unicode.IsLetter(r) {
|
||||
lx.backup()
|
||||
break
|
||||
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
|
||||
// consumed.
|
||||
func lexConst(lx *lexer, s string) stateFn {
|
||||
for i := range s[1:] {
|
||||
if r := lx.next(); r != rune(s[i+1]) {
|
||||
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
|
||||
s[:i]+string(r))
|
||||
}
|
||||
rs = append(rs, r)
|
||||
}
|
||||
s := string(rs)
|
||||
switch s {
|
||||
case "true", "false":
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
|
||||
// been consumed.
|
||||
func lexTrue(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "true"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
return lx.errorf("expected value but found %q instead", s)
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
|
||||
// been consumed.
|
||||
func lexFalse(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "false"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
@@ -864,7 +776,7 @@ func lexCommentStart(lx *lexer) stateFn {
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first newline character, and pass control
|
||||
// It will consume *up to* the first new line character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
@@ -922,7 +834,13 @@ func (itype itemType) String() string {
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
||||
case itemString:
|
||||
return "String"
|
||||
case itemRawString:
|
||||
return "String"
|
||||
case itemMultilineString:
|
||||
return "String"
|
||||
case itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
|
||||
133
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
133
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
@@ -2,6 +2,7 @@ package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -80,7 +81,7 @@ func (p *parser) next() item {
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
||||
log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
@@ -178,18 +179,10 @@ func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
if !numUnderscoresOK(it.val) {
|
||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
||||
it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseInt(val, 10, 64)
|
||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
||||
if err != nil {
|
||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid integer, but it's possible that the number is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
// See comment below for floats describing why we make a
|
||||
// distinction between a bug and a user error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
@@ -201,57 +194,29 @@ func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
||||
switch r {
|
||||
case '.', 'e', 'E':
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
for _, part := range parts {
|
||||
if !numUnderscoresOK(part) {
|
||||
p.panicf("Invalid float %q: underscores must be "+
|
||||
"surrounded by digits", it.val)
|
||||
}
|
||||
}
|
||||
if !numPeriodsOK(it.val) {
|
||||
// As a special case, numbers like '123.' or '1.e2',
|
||||
// which are valid as far as Go/strconv are concerned,
|
||||
// must be rejected because TOML says that a fractional
|
||||
// part consists of '.' followed by 1+ digits.
|
||||
p.panicf("Invalid float %q: '.' must be followed "+
|
||||
"by one or more digits", it.val)
|
||||
}
|
||||
val := strings.Replace(it.val, "_", "", -1)
|
||||
num, err := strconv.ParseFloat(val, 64)
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
if err != nil {
|
||||
// Distinguish float values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid float, but it's possible that the float is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
//
|
||||
// This is also true for integers.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.panicf("Invalid float value: %q", it.val)
|
||||
p.bug("Expected float value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
var t time.Time
|
||||
var ok bool
|
||||
var err error
|
||||
for _, format := range []string{
|
||||
"2006-01-02T15:04:05Z07:00",
|
||||
"2006-01-02T15:04:05",
|
||||
"2006-01-02",
|
||||
} {
|
||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
@@ -269,75 +234,11 @@ func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
case itemInlineTableStart:
|
||||
var (
|
||||
hash = make(map[string]interface{})
|
||||
outerContext = p.context
|
||||
outerKey = p.currentKey
|
||||
)
|
||||
|
||||
p.context = append(p.context, p.currentKey)
|
||||
p.currentKey = ""
|
||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
||||
if it.typ != itemKeyStart {
|
||||
p.bug("Expected key start but instead found %q, around line %d",
|
||||
it.val, p.approxLine)
|
||||
}
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
// retrieve key
|
||||
k := p.next()
|
||||
p.approxLine = k.line
|
||||
kname := p.keyString(k)
|
||||
|
||||
// retrieve value
|
||||
p.currentKey = kname
|
||||
val, typ := p.value(p.next())
|
||||
// make sure we keep metadata up to date
|
||||
p.setType(kname, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
hash[kname] = val
|
||||
}
|
||||
p.context = outerContext
|
||||
p.currentKey = outerKey
|
||||
return hash, tomlHash
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
||||
// characters that are not underscores.
|
||||
func numUnderscoresOK(s string) bool {
|
||||
accept := false
|
||||
for _, r := range s {
|
||||
if r == '_' {
|
||||
if !accept {
|
||||
return false
|
||||
}
|
||||
accept = false
|
||||
continue
|
||||
}
|
||||
accept = true
|
||||
}
|
||||
return accept
|
||||
}
|
||||
|
||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
||||
func numPeriodsOK(s string) bool {
|
||||
period := false
|
||||
for _, r := range s {
|
||||
if period && !isDigit(r) {
|
||||
return false
|
||||
}
|
||||
period = r == '.'
|
||||
}
|
||||
return !period
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
|
||||
9
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
9
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
@@ -95,8 +95,8 @@ func typeFields(t reflect.Type) []field {
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
opts := getOptions(sf.Tag)
|
||||
if opts.skip {
|
||||
name, _ := getOptions(sf.Tag.Get("toml"))
|
||||
if name == "-" {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
@@ -110,9 +110,8 @@ func typeFields(t reflect.Type) []field {
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := opts.name != ""
|
||||
name := opts.name
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
The MIT License
|
||||
|
||||
Copyright (c) 2016 Yasuhiro Matsumoto
|
||||
Copyright (c) 2013 VividCortex
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
@@ -9,13 +9,13 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
140
vendor/github.com/VividCortex/ewma/README.md
generated
vendored
Normal file
@@ -0,0 +1,140 @@
|
||||
# EWMA [](https://godoc.org/github.com/VividCortex/ewma) 
|
||||
|
||||
This repo provides Exponentially Weighted Moving Average algorithms, or EWMAs for short, [based on our
|
||||
Quantifying Abnormal Behavior talk](https://vividcortex.com/blog/2013/07/23/a-fast-go-library-for-exponential-moving-averages/).
|
||||
|
||||
### Exponentially Weighted Moving Average
|
||||
|
||||
An exponentially weighted moving average is a way to continuously compute a type of
|
||||
average for a series of numbers, as the numbers arrive. After a value in the series is
|
||||
added to the average, its weight in the average decreases exponentially over time. This
|
||||
biases the average towards more recent data. EWMAs are useful for several reasons, chiefly
|
||||
their inexpensive computational and memory cost, as well as the fact that they represent
|
||||
the recent central tendency of the series of values.
|
||||
|
||||
The EWMA algorithm requires a decay factor, alpha. The larger the alpha, the more the average
|
||||
is biased towards recent history. The alpha must be between 0 and 1, and is typically
|
||||
a fairly small number, such as 0.04. We will discuss the choice of alpha later.
|
||||
|
||||
The algorithm works thus, in pseudocode:
|
||||
|
||||
1. Multiply the next number in the series by alpha.
|
||||
2. Multiply the current value of the average by 1 minus alpha.
|
||||
3. Add the result of steps 1 and 2, and store it as the new current value of the average.
|
||||
4. Repeat for each number in the series.
|
||||
|
||||
There are special-case behaviors for how to initialize the current value, and these vary
|
||||
between implementations. One approach is to start with the first value in the series;
|
||||
another is to average the first 10 or so values in the series using an arithmetic average,
|
||||
and then begin the incremental updating of the average. Each method has pros and cons.
|
||||
|
||||
It may help to look at it pictorially. Suppose the series has five numbers, and we choose
|
||||
alpha to be 0.50 for simplicity. Here's the series, with numbers in the neighborhood of 300.
|
||||
|
||||

|
||||
|
||||
Now let's take the moving average of those numbers. First we set the average to the value
|
||||
of the first number.
|
||||
|
||||

|
||||
|
||||
Next we multiply the next number by alpha, multiply the current value by 1-alpha, and add
|
||||
them to generate a new value.
|
||||
|
||||

|
||||
|
||||
This continues until we are done.
|
||||
|
||||

|
||||
|
||||
Notice how each of the values in the series decays by half each time a new value
|
||||
is added, and the top of the bars in the lower portion of the image represents the
|
||||
size of the moving average. It is a smoothed, or low-pass, average of the original
|
||||
series.
|
||||
|
||||
For further reading, see [Exponentially weighted moving average](http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average) on wikipedia.
|
||||
|
||||
### Choosing Alpha
|
||||
|
||||
Consider a fixed-size sliding-window moving average (not an exponentially weighted moving average)
|
||||
that averages over the previous N samples. What is the average age of each sample? It is N/2.
|
||||
|
||||
Now suppose that you wish to construct a EWMA whose samples have the same average age. The formula
|
||||
to compute the alpha required for this is: alpha = 2/(N+1). Proof is in the book
|
||||
"Production and Operations Analysis" by Steven Nahmias.
|
||||
|
||||
So, for example, if you have a time-series with samples once per second, and you want to get the
|
||||
moving average over the previous minute, you should use an alpha of .032786885. This, by the way,
|
||||
is the constant alpha used for this repository's SimpleEWMA.
|
||||
|
||||
### Implementations
|
||||
|
||||
This repository contains two implementations of the EWMA algorithm, with different properties.
|
||||
|
||||
The implementations all conform to the MovingAverage interface, and the constructor returns
|
||||
that type.
|
||||
|
||||
Current implementations assume an implicit time interval of 1.0 between every sample added.
|
||||
That is, the passage of time is treated as though it's the same as the arrival of samples.
|
||||
If you need time-based decay when samples are not arriving precisely at set intervals, then
|
||||
this package will not support your needs at present.
|
||||
|
||||
#### SimpleEWMA
|
||||
|
||||
A SimpleEWMA is designed for low CPU and memory consumption. It **will** have different behavior than the VariableEWMA
|
||||
for multiple reasons. It has no warm-up period and it uses a constant
|
||||
decay. These properties let it use less memory. It will also behave
|
||||
differently when it's equal to zero, which is assumed to mean
|
||||
uninitialized, so if a value is likely to actually become zero over time,
|
||||
then any non-zero value will cause a sharp jump instead of a small change.
|
||||
|
||||
#### VariableEWMA
|
||||
|
||||
Unlike SimpleEWMA, this supports a custom age which must be stored, and thus uses more memory.
|
||||
It also has a "warmup" time when you start adding values to it. It will report a value of 0.0
|
||||
until you have added the required number of samples to it. It uses some memory to store the
|
||||
number of samples added to it. As a result it uses a little over twice the memory of SimpleEWMA.
|
||||
|
||||
## Usage
|
||||
|
||||
### API Documentation
|
||||
|
||||
View the GoDoc generated documentation [here](http://godoc.org/github.com/VividCortex/ewma).
|
||||
|
||||
```go
|
||||
package main
|
||||
import "github.com/VividCortex/ewma"
|
||||
|
||||
func main() {
|
||||
samples := [100]float64{
|
||||
4599, 5711, 4746, 4621, 5037, 4218, 4925, 4281, 5207, 5203, 5594, 5149,
|
||||
}
|
||||
|
||||
e := ewma.NewMovingAverage() //=> Returns a SimpleEWMA if called without params
|
||||
a := ewma.NewMovingAverage(5) //=> returns a VariableEWMA with a decay of 2 / (5 + 1)
|
||||
|
||||
for _, f := range samples {
|
||||
e.Add(f)
|
||||
a.Add(f)
|
||||
}
|
||||
|
||||
e.Value() //=> 13.577404704631077
|
||||
a.Value() //=> 1.5806140565521463e-12
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
We only accept pull requests for minor fixes or improvements. This includes:
|
||||
|
||||
* Small bug fixes
|
||||
* Typos
|
||||
* Documentation or comments
|
||||
|
||||
Please open issues to discuss new features. Pull requests for new features will be rejected,
|
||||
so we recommend forking the repository and making changes in your fork for your use case.
|
||||
|
||||
## License
|
||||
|
||||
This repository is Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
It is licensed under the MIT license. Please see the LICENSE file for applicable license terms.
|
||||
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
126
vendor/github.com/VividCortex/ewma/ewma.go
generated
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
// Package ewma implements exponentially weighted moving averages.
|
||||
package ewma
|
||||
|
||||
// Copyright (c) 2013 VividCortex, Inc. All rights reserved.
|
||||
// Please see the LICENSE file for applicable license terms.
|
||||
|
||||
const (
|
||||
// By default, we average over a one-minute period, which means the average
|
||||
// age of the metrics in the period is 30 seconds.
|
||||
AVG_METRIC_AGE float64 = 30.0
|
||||
|
||||
// The formula for computing the decay factor from the average age comes
|
||||
// from "Production and Operations Analysis" by Steven Nahmias.
|
||||
DECAY float64 = 2 / (float64(AVG_METRIC_AGE) + 1)
|
||||
|
||||
// For best results, the moving average should not be initialized to the
|
||||
// samples it sees immediately. The book "Production and Operations
|
||||
// Analysis" by Steven Nahmias suggests initializing the moving average to
|
||||
// the mean of the first 10 samples. Until the VariableEwma has seen this
|
||||
// many samples, it is not "ready" to be queried for the value of the
|
||||
// moving average. This adds some memory cost.
|
||||
WARMUP_SAMPLES uint8 = 10
|
||||
)
|
||||
|
||||
// MovingAverage is the interface that computes a moving average over a time-
|
||||
// series stream of numbers. The average may be over a window or exponentially
|
||||
// decaying.
|
||||
type MovingAverage interface {
|
||||
Add(float64)
|
||||
Value() float64
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// NewMovingAverage constructs a MovingAverage that computes an average with the
|
||||
// desired characteristics in the moving window or exponential decay. If no
|
||||
// age is given, it constructs a default exponentially weighted implementation
|
||||
// that consumes minimal memory. The age is related to the decay factor alpha
|
||||
// by the formula given for the DECAY constant. It signifies the average age
|
||||
// of the samples as time goes to infinity.
|
||||
func NewMovingAverage(age ...float64) MovingAverage {
|
||||
if len(age) == 0 || age[0] == AVG_METRIC_AGE {
|
||||
return new(SimpleEWMA)
|
||||
}
|
||||
return &VariableEWMA{
|
||||
decay: 2 / (age[0] + 1),
|
||||
}
|
||||
}
|
||||
|
||||
// A SimpleEWMA represents the exponentially weighted moving average of a
|
||||
// series of numbers. It WILL have different behavior than the VariableEWMA
|
||||
// for multiple reasons. It has no warm-up period and it uses a constant
|
||||
// decay. These properties let it use less memory. It will also behave
|
||||
// differently when it's equal to zero, which is assumed to mean
|
||||
// uninitialized, so if a value is likely to actually become zero over time,
|
||||
// then any non-zero value will cause a sharp jump instead of a small change.
|
||||
// However, note that this takes a long time, and the value may just
|
||||
// decays to a stable value that's close to zero, but which won't be mistaken
|
||||
// for uninitialized. See http://play.golang.org/p/litxBDr_RC for example.
|
||||
type SimpleEWMA struct {
|
||||
// The current value of the average. After adding with Add(), this is
|
||||
// updated to reflect the average of all values seen thus far.
|
||||
value float64
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *SimpleEWMA) Add(value float64) {
|
||||
if e.value == 0 { // this is a proxy for "uninitialized"
|
||||
e.value = value
|
||||
} else {
|
||||
e.value = (value * DECAY) + (e.value * (1 - DECAY))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the moving average.
|
||||
func (e *SimpleEWMA) Value() float64 {
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *SimpleEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
}
|
||||
|
||||
// VariableEWMA represents the exponentially weighted moving average of a series of
|
||||
// numbers. Unlike SimpleEWMA, it supports a custom age, and thus uses more memory.
|
||||
type VariableEWMA struct {
|
||||
// The multiplier factor by which the previous samples decay.
|
||||
decay float64
|
||||
// The current value of the average.
|
||||
value float64
|
||||
// The number of samples added to this instance.
|
||||
count uint8
|
||||
}
|
||||
|
||||
// Add adds a value to the series and updates the moving average.
|
||||
func (e *VariableEWMA) Add(value float64) {
|
||||
switch {
|
||||
case e.count < WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value += value
|
||||
case e.count == WARMUP_SAMPLES:
|
||||
e.count++
|
||||
e.value = e.value / float64(WARMUP_SAMPLES)
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
default:
|
||||
e.value = (value * e.decay) + (e.value * (1 - e.decay))
|
||||
}
|
||||
}
|
||||
|
||||
// Value returns the current value of the average, or 0.0 if the series hasn't
|
||||
// warmed up yet.
|
||||
func (e *VariableEWMA) Value() float64 {
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
return 0.0
|
||||
}
|
||||
|
||||
return e.value
|
||||
}
|
||||
|
||||
// Set sets the EWMA's value.
|
||||
func (e *VariableEWMA) Set(value float64) {
|
||||
e.value = value
|
||||
if e.count <= WARMUP_SAMPLES {
|
||||
e.count = WARMUP_SAMPLES + 1
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
20
vendor/github.com/beorn7/perks/LICENSE
generated
vendored
@@ -1,20 +0,0 @@
|
||||
Copyright (C) 2013 Blake Mizerany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
292
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
@@ -1,292 +0,0 @@
|
||||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
||||
19
vendor/github.com/boltdb/bolt/README.md
generated
vendored
19
vendor/github.com/boltdb/bolt/README.md
generated
vendored
@@ -21,25 +21,6 @@ consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## A message from the author
|
||||
|
||||
> The original goal of Bolt was to provide a simple pure Go key/value store and to
|
||||
> not bloat the code with extraneous features. To that end, the project has been
|
||||
> a success. However, this limited scope also means that the project is complete.
|
||||
>
|
||||
> Maintaining an open source database requires an immense amount of time and energy.
|
||||
> Changes to the code can have unintended and sometimes catastrophic effects so
|
||||
> even simple changes require hours and hours of careful testing and validation.
|
||||
>
|
||||
> Unfortunately I no longer have the time or energy to continue this work. Bolt is
|
||||
> in a stable state and has years of successful production use. As such, I feel that
|
||||
> leaving it in its current state is the most prudent course of action.
|
||||
>
|
||||
> If you are interested in using a more featureful version of Bolt, I suggest that
|
||||
> you look at the CoreOS fork called [bbolt](https://github.com/coreos/bbolt).
|
||||
|
||||
- Ben Johnson ([@benbjohnson](https://twitter.com/benbjohnson))
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
|
||||
4
vendor/github.com/boltdb/bolt/db.go
generated
vendored
4
vendor/github.com/boltdb/bolt/db.go
generated
vendored
@@ -737,7 +737,9 @@ retry:
|
||||
|
||||
// pass success, or bolt internal errors, to all callers
|
||||
for _, c := range b.calls {
|
||||
c.err <- err
|
||||
if c.err != nil {
|
||||
c.err <- err
|
||||
}
|
||||
}
|
||||
break retry
|
||||
}
|
||||
|
||||
4
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
4
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
@@ -291,9 +291,7 @@ func (tx *Tx) close() {
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility.
|
||||
//
|
||||
// Deprecated; Use WriteTo() instead.
|
||||
// This function exists for backwards compatibility. Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
|
||||
200
vendor/github.com/containers/image/copy/copy.go
generated
vendored
200
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -6,24 +6,29 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/vbauerster/mpb"
|
||||
"github.com/vbauerster/mpb/decor"
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
"golang.org/x/sync/semaphore"
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
@@ -84,6 +89,7 @@ type copier struct {
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressOutput io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
@@ -152,11 +158,19 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
}()
|
||||
|
||||
// If reportWriter is not a TTY (e.g., when piping to a file), do not
|
||||
// print the progress bars to avoid long and hard to parse output.
|
||||
// createProgressBar() will print a single line instead.
|
||||
progressOutput := reportWriter
|
||||
if !isTTY(reportWriter) {
|
||||
progressOutput = ioutil.Discard
|
||||
}
|
||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||
c := &copier{
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressOutput: progressOutput,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
copyInParallel: copyInParallel,
|
||||
@@ -201,7 +215,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifest []byte, retErr error) {
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifestBytes []byte, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
@@ -224,6 +238,26 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
// If the destination is a digested reference, make a note of that, determine what digest value we're
|
||||
// expecting, and check that the source manifest matches it.
|
||||
destIsDigestedReference := false
|
||||
if named := c.dest.Reference().DockerReference(); named != nil {
|
||||
if digested, ok := named.(reference.Digested); ok {
|
||||
destIsDigestedReference = true
|
||||
sourceManifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error reading manifest from source image")
|
||||
}
|
||||
matches, err := manifest.MatchesDigest(sourceManifest, digested.Digest())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error computing digest of source image's manifest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, errors.New("Digest of source image's manifest would not match destination reference")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -251,15 +285,15 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0,
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
|
||||
canModifyManifest: len(sigs) == 0 && !destIsDigestedReference,
|
||||
}
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
ic.canSubstituteBlobs = ic.canModifyManifest && options.SignBy == ""
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return nil, err
|
||||
@@ -283,7 +317,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
||||
manifest, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
manifestBytes, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
||||
@@ -314,7 +348,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
// We have successfully uploaded a manifest.
|
||||
manifest = attemptedManifest
|
||||
manifestBytes = attemptedManifest
|
||||
errs = nil // Mark this as a success so that we don't abort below.
|
||||
break
|
||||
}
|
||||
@@ -324,7 +358,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifest, options.SignBy)
|
||||
newSig, err := c.createSignature(manifestBytes, options.SignBy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -336,7 +370,7 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
return nil, errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
return manifest, nil
|
||||
return manifestBytes, nil
|
||||
}
|
||||
|
||||
// Printf writes a formatted string to c.reportWriter.
|
||||
@@ -389,20 +423,12 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// shortDigest returns the first 12 characters of the digest.
|
||||
func shortDigest(d digest.Digest) string {
|
||||
return d.Encoded()[:12]
|
||||
}
|
||||
|
||||
// createProgressBar creates a pb.ProgressBar.
|
||||
func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar {
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest)))
|
||||
bar.Output = writer
|
||||
return bar
|
||||
// isTTY returns true if the io.Writer is a file and a tty.
|
||||
func isTTY(w io.Writer) bool {
|
||||
if f, ok := w.(*os.File); ok {
|
||||
return terminal.IsTerminal(int(f.Fd()))
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
@@ -431,6 +457,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
copyGroup.Add(numLayers)
|
||||
|
||||
// copySemaphore is used to limit the number of parallel downloads to
|
||||
// avoid malicious images causing troubles and to be nice to servers.
|
||||
var copySemaphore *semaphore.Weighted
|
||||
@@ -441,8 +468,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) {
|
||||
defer bar.Finish()
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *mpb.Bar) {
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
@@ -452,44 +478,37 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest)))
|
||||
bar.Finish()
|
||||
} else {
|
||||
cld.destInfo = srcLayer
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest)))
|
||||
bar.Add64(bar.Total)
|
||||
bar.Finish()
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
}
|
||||
} else {
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
|
||||
}
|
||||
data[index] = cld
|
||||
bar.SetTotal(srcLayer.Size, true)
|
||||
}
|
||||
|
||||
progressBars := make([]*pb.ProgressBar, numLayers)
|
||||
for i, srcInfo := range srcInfos {
|
||||
bar := createProgressBar(srcInfo, "blob", nil)
|
||||
progressBars[i] = bar
|
||||
}
|
||||
func() { // A scope for defer
|
||||
progressPool, progressCleanup := ic.c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
|
||||
progressPool := pb.NewPool(progressBars...)
|
||||
progressPool.Output = ic.c.reportWriter
|
||||
if err := progressPool.Start(); err != nil {
|
||||
return errors.Wrapf(err, "error creating progress-bar pool")
|
||||
}
|
||||
progressBars := make([]*mpb.Bar, numLayers)
|
||||
for i, srcInfo := range srcInfos {
|
||||
progressBars[i] = ic.c.createProgressBar(progressPool, srcInfo, "blob")
|
||||
}
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressBars[i])
|
||||
}
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressBars[i])
|
||||
}
|
||||
|
||||
// Wait for all layers to be copied
|
||||
copyGroup.Wait()
|
||||
}()
|
||||
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
|
||||
copyGroup.Wait()
|
||||
progressPool.Stop()
|
||||
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
@@ -560,6 +579,44 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// newProgressPool creates a *mpb.Progress and a cleanup function.
|
||||
// The caller must eventually call the returned cleanup function after the pool will no longer be updated.
|
||||
func (c *copier) newProgressPool(ctx context.Context) (*mpb.Progress, func()) {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
pool := mpb.New(mpb.WithWidth(40), mpb.WithOutput(c.progressOutput), mpb.WithContext(ctx))
|
||||
return pool, func() {
|
||||
cancel()
|
||||
pool.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
// createProgressBar creates a mpb.Bar in pool. Note that if the copier's reportWriter
|
||||
// is ioutil.Discard, the progress bar's output will be discarded
|
||||
func (c *copier) createProgressBar(pool *mpb.Progress, info types.BlobInfo, kind string) *mpb.Bar {
|
||||
// shortDigestLen is the length of the digest used for blobs.
|
||||
const shortDigestLen = 12
|
||||
|
||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||
if len(prefix) > maxPrefixLen {
|
||||
prefix = prefix[:maxPrefixLen]
|
||||
}
|
||||
|
||||
bar := pool.AddBar(info.Size,
|
||||
mpb.PrependDecorators(
|
||||
decor.Name(prefix),
|
||||
),
|
||||
mpb.AppendDecorators(
|
||||
decor.CountersKibiByte("%.1f / %.1f"),
|
||||
),
|
||||
)
|
||||
if c.progressOutput == ioutil.Discard {
|
||||
c.Printf("Copying %s %s\n", kind, info.Digest)
|
||||
}
|
||||
return bar
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
@@ -568,12 +625,20 @@ func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
bar := createProgressBar(srcInfo, "config", c.reportWriter)
|
||||
defer bar.Finish()
|
||||
bar.Start()
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
|
||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||
progressPool, progressCleanup := c.newProgressPool(ctx)
|
||||
defer progressCleanup()
|
||||
bar := c.createProgressBar(progressPool, srcInfo, "config")
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
bar.SetTotal(int64(len(configBlob)), true)
|
||||
return destInfo, nil
|
||||
}()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
@@ -591,7 +656,7 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) {
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *mpb.Bar) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
@@ -602,9 +667,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest)))
|
||||
bar.Add64(bar.Total)
|
||||
bar.Finish()
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
}
|
||||
@@ -616,8 +679,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded, bar)
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize}, diffIDIsNeeded, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
@@ -645,7 +707,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, ba
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, bar *mpb.Bar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@@ -706,7 +768,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, bar *mpb.Bar) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
@@ -729,7 +791,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
destStream = bar.ProxyReader(destStream)
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
|
||||
33
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
33
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -91,7 +91,6 @@ type dockerClient struct {
|
||||
password string
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
@@ -282,7 +281,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -362,8 +361,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
|
||||
logrus.Debugf("trying to talk to v1 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
@@ -379,8 +378,8 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
|
||||
logrus.Debugf("trying to talk to v2 search endpoint")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
@@ -409,20 +408,20 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth, extraScope)
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -441,7 +440,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
if auth == v2Auth {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
if err := c.setupRequestAuth(req, extraScope); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -460,7 +459,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
|
||||
//
|
||||
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error {
|
||||
if len(c.challenges) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -474,10 +473,10 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
case "bearer":
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if c.extraScope != nil {
|
||||
if extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
|
||||
scopes = append(scopes, *c.extraScope)
|
||||
cacheKey = fmt.Sprintf("%s:%s", extraScope.remoteName, extraScope.actions)
|
||||
scopes = append(scopes, *extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
t, inCache := c.tokenCache.Load(cacheKey)
|
||||
@@ -564,7 +563,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return err
|
||||
@@ -591,7 +590,7 @@ func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return false
|
||||
@@ -625,7 +624,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
2
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -66,7 +66,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
62
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
62
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -12,6 +12,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -113,7 +114,7 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
@@ -140,7 +141,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -157,7 +158,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
@@ -176,7 +177,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -194,10 +195,10 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -218,7 +219,7 @@ func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.
|
||||
}
|
||||
|
||||
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error {
|
||||
u := url.URL{
|
||||
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
|
||||
RawQuery: url.Values{
|
||||
@@ -228,7 +229,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -246,7 +247,7 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
|
||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
@@ -276,7 +277,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
@@ -286,15 +287,6 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an expanded token scope.
|
||||
// We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
|
||||
if d.c.extraScope != nil {
|
||||
return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
|
||||
}
|
||||
defer func() {
|
||||
d.c.extraScope = nil
|
||||
}()
|
||||
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
@@ -314,7 +306,10 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
||||
}
|
||||
|
||||
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
|
||||
d.c.extraScope = &authScope{
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an
|
||||
// expanded token scope.
|
||||
extraScope := &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
@@ -325,7 +320,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||
// so, be a nice client and don't create unnecesary upload sessions on the server.
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
@@ -335,7 +330,7 @@ func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.
|
||||
continue // logrus.Debug() already happened in blobExists
|
||||
}
|
||||
if candidateRepo.Name() != d.ref.ref.Name() {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil {
|
||||
logrus.Debugf("... Mount failed: %v", err)
|
||||
continue
|
||||
}
|
||||
@@ -369,7 +364,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) erro
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -396,14 +391,29 @@ func isManifestInvalidError(err error) bool {
|
||||
if !ok || len(errors) == 0 {
|
||||
return false
|
||||
}
|
||||
ec, ok := errors[0].(errcode.ErrorCoder)
|
||||
err = errors[0]
|
||||
ec, ok := err.(errcode.ErrorCoder)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
switch ec.ErrorCode() {
|
||||
// ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
|
||||
case v2.ErrorCodeManifestInvalid:
|
||||
return true
|
||||
// ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
|
||||
// when uploading to a tag (because it can’t find a matching tag inside the manifest)
|
||||
return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
|
||||
case v2.ErrorCodeTagInvalid:
|
||||
return true
|
||||
// ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when
|
||||
// uploading an OCI manifest that is (correctly, according to the spec) missing
|
||||
// a top-level media type. See libpod issue #1719
|
||||
// FIXME: remove this case when ECR behavior is fixed
|
||||
case errcode.ErrorCodeUnsupported:
|
||||
return strings.Contains(err.Error(), "Invalid JSON syntax")
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutSignatures(ctx context.Context, signatures [][]byte) error {
|
||||
@@ -574,7 +584,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
16
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
16
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -89,7 +89,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
err error
|
||||
)
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
@@ -147,10 +147,10 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
break
|
||||
}
|
||||
}
|
||||
if resp.Body != nil && err == nil {
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return nil, 0, err
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
@@ -176,7 +176,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -340,7 +340,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -362,7 +362,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth)
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
76
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
76
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -21,8 +22,10 @@ import (
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
type Source struct {
|
||||
tarPath string
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
removeTarPathOnClose bool // Remove temp file on close if true
|
||||
cacheDataLock sync.Once // Atomic way to ensure that ensureCachedDataIsPresent is only invoked once
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
cacheDataResult error // The return value of ensureCachedDataIsPresent, since it should be as safe to cache as the side effects
|
||||
tarManifest *ManifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
configDigest digest.Digest
|
||||
@@ -199,43 +202,46 @@ func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
func (s *Source) ensureCachedDataIsPresent() error {
|
||||
if s.tarManifest != nil {
|
||||
return nil
|
||||
}
|
||||
s.cacheDataLock.Do(func() {
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
s.cacheDataResult = errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
return
|
||||
}
|
||||
|
||||
// Check to make sure length is 1
|
||||
if len(tarManifest) != 1 {
|
||||
return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
|
||||
}
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
s.cacheDataResult = errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
return
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest[0].Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
|
||||
}
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
if err != nil {
|
||||
s.cacheDataResult = err
|
||||
return
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
return nil
|
||||
// Success; commit.
|
||||
s.tarManifest = &tarManifest[0]
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
})
|
||||
return s.cacheDataResult
|
||||
}
|
||||
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
@@ -399,7 +405,7 @@ func (r uncompressedReadCloser) Close() error {
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *Source) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
|
||||
66
vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json
generated
vendored
Normal file
66
vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json
generated
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
{
|
||||
"title": "JSON embedded in an atomic container signature",
|
||||
"description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"critical",
|
||||
"optional"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"critical": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"type",
|
||||
"image",
|
||||
"identity"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"atomic container signature"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"docker-manifest-digest"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"docker-manifest-digest": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"identity": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"docker-reference"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"docker-reference": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"optional": {
|
||||
"type": "object",
|
||||
"description": "All members are optional, but if they are included, they must be valid.",
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"creator": {
|
||||
"type": "string"
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
241
vendor/github.com/containers/image/docs/atomic-signature.md
generated
vendored
Normal file
241
vendor/github.com/containers/image/docs/atomic-signature.md
generated
vendored
Normal file
@@ -0,0 +1,241 @@
|
||||
% atomic-signature(5) Atomic signature format
|
||||
% Miloslav Trmač
|
||||
% March 2017
|
||||
|
||||
# Atomic signature format
|
||||
|
||||
This document describes the format of “atomic” container signatures,
|
||||
as implemented by the `github.com/containers/image/signature` package.
|
||||
|
||||
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
|
||||
(preferably through the higher-level `signature.PolicyContext` interface)
|
||||
without having to care about the details of the format described below.
|
||||
This documentation exists primarily for maintainers of the package
|
||||
and to allow independent reimplementations.
|
||||
|
||||
## High-level overview
|
||||
|
||||
The signature provides an end-to-end authenticated claim that a container image
|
||||
has been approved by a specific party (e.g. the creator of the image as their work,
|
||||
an automated build system as a result of an automated build,
|
||||
a company IT department approving the image for production) under a specified _identity_
|
||||
(e.g. an OS base image / specific application, with a specific version).
|
||||
|
||||
An atomic container signature consists of a cryptographic signature which identifies
|
||||
and authenticates who signed the image, and carries as a signed payload a JSON document.
|
||||
The JSON document identifies the image being signed, claims a specific identity of the
|
||||
image and if applicable, contains other information about the image.
|
||||
|
||||
The signatures do not modify the container image (the layers, configuration, manifest, …);
|
||||
e.g. their presence does not change the manifest digest used to identify the image in
|
||||
docker/distribution servers; rather, the signatures are associated with an immutable image.
|
||||
An image can have any number of signatures so signature distribution systems SHOULD support
|
||||
associating more than one signature with an image.
|
||||
|
||||
## The cryptographic signature
|
||||
|
||||
As distributed, the atomic container signature is a blob which contains a cryptographic signature
|
||||
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
|
||||
JSON document and a signature of the JSON document; it is not a “detached signature” with
|
||||
independent blobs containing the JSON document and a cryptographic signature).
|
||||
|
||||
Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880),
|
||||
but others may be added in the future. (The blob does not contain metadata identifying the
|
||||
cryptographic signature format. It is expected that most formats are sufficiently self-describing
|
||||
that this is not necessary and the configured expected public key provides another indication
|
||||
of the expected cryptographic signature format. Such metadata may be added in the future for
|
||||
newly added cryptographic signature formats, if necessary.)
|
||||
|
||||
Consumers of atomic container signatures SHOULD verify the cryptographic signature
|
||||
against one or more trusted public keys
|
||||
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
|
||||
before parsing or processing the JSON payload in _any_ way,
|
||||
in particular they SHOULD stop processing the container signature
|
||||
if the cryptographic signature verification fails, without even starting to process the JSON payload.
|
||||
|
||||
(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature,
|
||||
and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs,
|
||||
e.g. to list the authors and image identities of signatures associated with a single container image;
|
||||
if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted
|
||||
or in any way usable for making policy decisions about the image.)
|
||||
|
||||
### OpenPGP signature verification
|
||||
|
||||
When verifying a cryptographic signature in the OpenPGP format,
|
||||
the consumer MUST verify at least the following aspects of the signature
|
||||
(like the `github.com/containers/image/signature` package does):
|
||||
|
||||
- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3.
|
||||
(e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format).
|
||||
- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image).
|
||||
- The signature MUST be correctly formed and pass the cryptographic validation.
|
||||
- The signature MUST correctly authenticate the included JSON payload
|
||||
(in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated).
|
||||
- The signature MUST NOT be expired.
|
||||
|
||||
The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected.
|
||||
|
||||
## JSON processing and forward compatibility
|
||||
|
||||
The payload of the cryptographic signature is a JSON document (RFC 7159).
|
||||
Consumers SHOULD parse it very strictly,
|
||||
refusing any signature which violates the expected format (e.g. missing members, incorrect member types)
|
||||
or can be interpreted ambiguously (e.g. a duplicated member in a JSON object).
|
||||
|
||||
Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized
|
||||
to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below)
|
||||
and if the semantics of the invalid document, as created by such an implementation, is clear.
|
||||
|
||||
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
|
||||
each a JSON object.
|
||||
|
||||
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
|
||||
(as defined [below](#criticaltype))
|
||||
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
|
||||
|
||||
To ensure forward compatibility (allowing older signature consumers to correctly
|
||||
accept or reject signatures created at a later date, with possible extensions to this format),
|
||||
consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects,
|
||||
contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected.
|
||||
At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members.
|
||||
|
||||
For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object,
|
||||
and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported.
|
||||
Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid.
|
||||
|
||||
## JSON data format
|
||||
|
||||
An example of the full format follows, with detailed description below.
|
||||
To reiterate, consumers of the signature SHOULD perform successful cryptographic verification,
|
||||
and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above.
|
||||
|
||||
```json
|
||||
{
|
||||
"critical": {
|
||||
"type": "atomic container signature",
|
||||
"image": {
|
||||
"docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e"
|
||||
},
|
||||
"identity": {
|
||||
"docker-reference": "docker.io/library/busybox:latest"
|
||||
}
|
||||
},
|
||||
"optional": {
|
||||
"creator": "some software package v1.0.1-35",
|
||||
"timestamp": 1483228800,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `critical`
|
||||
|
||||
This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature.
|
||||
|
||||
Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
### `critical.type`
|
||||
|
||||
This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces).
|
||||
|
||||
Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value.
|
||||
|
||||
(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future;
|
||||
if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`,
|
||||
not by this document.)
|
||||
|
||||
### `critical.image`
|
||||
|
||||
This MUST be a JSON object which identifies the container image this signature applies to.
|
||||
|
||||
Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
(Currently only the `docker-manifest-digest` way of identifying a container image is defined;
|
||||
alternatives to this may be defined in the future,
|
||||
but existing consumers are required to reject signatures which use formats they do not support.)
|
||||
|
||||
### `critical.image.docker-manifest-digest`
|
||||
|
||||
This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format.
|
||||
|
||||
The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system.
|
||||
|
||||
The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way
|
||||
(e.g. parsing the manifest further or downloading layers of the image).
|
||||
|
||||
Implementation notes:
|
||||
* A single container image manifest may have several valid manifest digest values, using different algorithms.
|
||||
* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests,
|
||||
the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob.
|
||||
|
||||
### `critical.identity`
|
||||
|
||||
This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information),
|
||||
as asserted by the author of the signature.
|
||||
|
||||
Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
(Currently only the `docker-reference` way of claiming an image identity/purpose is defined;
|
||||
alternatives to this may be defined in the future,
|
||||
but existing consumers are required to reject signatures which use formats they do not support.)
|
||||
|
||||
### `critical.identity.docker-reference`
|
||||
|
||||
This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format,
|
||||
and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`).
|
||||
If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning,
|
||||
the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces).
|
||||
|
||||
The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image
|
||||
(again, accounting for the `docker/distribution/reference` normalization semantics).
|
||||
|
||||
In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image.
|
||||
However, depending on the specific application, users or system administrators may accept less specific matches
|
||||
(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest),
|
||||
or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image
|
||||
(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images).
|
||||
The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md).
|
||||
|
||||
The `critical.identity.docker-reference` value SHOULD contain either a tag or digest;
|
||||
in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).)
|
||||
|
||||
### `optional`
|
||||
|
||||
This MUST be a JSON object.
|
||||
|
||||
Consumers SHOULD accept any members with unrecognized names in the `optional` object,
|
||||
and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported.
|
||||
Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid.
|
||||
|
||||
### `optional.creator`
|
||||
|
||||
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
|
||||
|
||||
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
|
||||
|
||||
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
|
||||
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
|
||||
changes whenever the format or semantics of the generated signature changes in any way;
|
||||
it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value
|
||||
- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp),
|
||||
and which makes it easy enough to recognize a range of versions of a specific implementation
|
||||
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
|
||||
the string should contain a version number, or at least a date of the commit).
|
||||
|
||||
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
(perhaps augmented with `optional.timestamp`),
|
||||
and MAY change their processing of the signature based on these values
|
||||
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
|
||||
as long as the semantics of the invalid document, as created by such an implementation, is clear.
|
||||
|
||||
If consumers of signatures do change their behavior based on the `optional.creator` value,
|
||||
they SHOULD take care that the way they process the signatures is not inconsistent with
|
||||
strictly validating signature consumers.
|
||||
(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value
|
||||
if other implementations would completely reject the signature,
|
||||
but it would be very undesirable for the two kinds of implementations to accept the signature in different
|
||||
and inconsistent situations.)
|
||||
|
||||
### `optional.timestamp`
|
||||
|
||||
If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created
|
||||
as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC).
|
||||
283
vendor/github.com/containers/image/docs/containers-policy.json.md
generated
vendored
Normal file
283
vendor/github.com/containers/image/docs/containers-policy.json.md
generated
vendored
Normal file
@@ -0,0 +1,283 @@
|
||||
% CONTAINERS-POLICY.JSON(5) policy.json Man Page
|
||||
% Miloslav Trmač
|
||||
% September 2016
|
||||
|
||||
# NAME
|
||||
containers-policy.json - syntax for the signature verification policy file
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Signature verification policy files are used to specify policy, e.g. trusted keys,
|
||||
applicable when deciding whether to accept an image, or individual signatures of that image, as valid.
|
||||
|
||||
The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`;
|
||||
applications performing verification may allow using a different policy instead.
|
||||
|
||||
## FORMAT
|
||||
|
||||
The signature verification policy file, usually called `policy.json`,
|
||||
uses a JSON format. Unlike some other JSON files, its parsing is fairly strict:
|
||||
unrecognized, duplicated or otherwise invalid fields cause the entire file,
|
||||
and usually the entire operation, to be rejected.
|
||||
|
||||
The purpose of the policy file is to define a set of *policy requirements* for a container image,
|
||||
usually depending on its location (where it is being pulled from) or otherwise defined identity.
|
||||
|
||||
Policy requirements can be defined for:
|
||||
|
||||
- An individual *scope* in a *transport*.
|
||||
The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`),
|
||||
and *scope* values are defined by each transport; see below for more details.
|
||||
|
||||
Usually, a scope can be defined to match a single image, and various prefixes of
|
||||
such a most specific scope define namespaces of matching images.
|
||||
- A default policy for a single transport, expressed using an empty string as a scope
|
||||
- A global default policy.
|
||||
|
||||
If multiple policy requirements match a given image, only the requirements from the most specific match apply,
|
||||
the more general policy requirements definitions are ignored.
|
||||
|
||||
This is expressed in JSON using the top-level syntax
|
||||
```js
|
||||
{
|
||||
"default": [/* policy requirements: global default */]
|
||||
"transports": {
|
||||
transport_name: {
|
||||
"": [/* policy requirements: default for transport $transport_name */],
|
||||
scope_1: [/* policy requirements: default for $scope_1 in $transport_name */],
|
||||
scope_2: [/*…*/]
|
||||
/*…*/
|
||||
},
|
||||
transport_name_2: {/*…*/}
|
||||
/*…*/
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The global `default` set of policy requirements is mandatory; all of the other fields
|
||||
(`transports` itself, any specific transport, the transport-specific default, etc.) are optional.
|
||||
|
||||
<!-- NOTE: Keep this in sync with transports/transports.go! -->
|
||||
## Supported transports and their scopes
|
||||
|
||||
### `atomic:`
|
||||
|
||||
The `atomic:` transport refers to images in an Atomic Registry.
|
||||
|
||||
Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]],
|
||||
i.e. either specifying a complete name of a tagged image, or prefix denoting
|
||||
a host/namespace/image stream.
|
||||
|
||||
*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used
|
||||
e.g. for `docker pull`), _not_ to the OpenShift API host and port.
|
||||
|
||||
### `dir:`
|
||||
|
||||
The `dir:` transport refers to images stored in local directories.
|
||||
|
||||
Supported scopes are paths of directories (either containing a single image or
|
||||
subdirectories possibly containing images).
|
||||
|
||||
*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
|
||||
|
||||
The top-level scope `"/"` is forbidden; use the transport default scope `""`,
|
||||
for consistency with other transports.
|
||||
|
||||
### `docker:`
|
||||
|
||||
The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2".
|
||||
|
||||
Scopes matching individual images are named Docker references *in the fully expanded form*, either
|
||||
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
|
||||
|
||||
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
|
||||
a repository namespace, or a registry host (by only specifying the host name).
|
||||
|
||||
### `oci:`
|
||||
|
||||
The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification".
|
||||
|
||||
Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to
|
||||
a directory containing one or more tags, or any of the parent directories.
|
||||
|
||||
*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently.
|
||||
|
||||
### `tarball:`
|
||||
|
||||
The `tarball:` transport refers to tarred up container root filesystems.
|
||||
|
||||
Scopes are ignored.
|
||||
|
||||
## Policy Requirements
|
||||
|
||||
Using the mechanisms above, a set of policy requirements is looked up. The policy requirements
|
||||
are represented as a JSON array of individual requirement objects. For an image to be accepted,
|
||||
*all* of the requirements must be satisfied simulatenously.
|
||||
|
||||
The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author);
|
||||
in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object.
|
||||
|
||||
The following requirement objects are supported:
|
||||
|
||||
### `insecureAcceptAnything`
|
||||
|
||||
A simple requirement with the following syntax
|
||||
|
||||
```json
|
||||
{"type":"insecureAcceptAnything"}
|
||||
```
|
||||
|
||||
This requirement accepts any image (but note that other requirements in the array still apply).
|
||||
|
||||
When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though.
|
||||
|
||||
This is useful primarily for policy scopes where no signature verification is required;
|
||||
because the array of policy requirements must not be empty, this requirement is used
|
||||
to represent the lack of requirements explicitly.
|
||||
|
||||
### `reject`
|
||||
|
||||
A simple requirement with the following syntax:
|
||||
|
||||
```json
|
||||
{"type":"reject"}
|
||||
```
|
||||
|
||||
This requirement rejects every image, and every signature.
|
||||
|
||||
### `signedBy`
|
||||
|
||||
This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys", /* The only currently supported value */
|
||||
"keyPath": "/path/to/local/keyring/file",
|
||||
"keyData": "base64-encoded-keyring-data",
|
||||
"signedIdentity": identity_requirement
|
||||
}
|
||||
```
|
||||
<!-- Later: other keyType values -->
|
||||
|
||||
Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted.
|
||||
|
||||
The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image.
|
||||
One of the following alternatives are supported:
|
||||
|
||||
- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail.
|
||||
|
||||
```json
|
||||
{"type":"matchExact"}
|
||||
```
|
||||
- If the image identity carries a tag, the identity in the signature must exactly match;
|
||||
if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag).
|
||||
|
||||
(Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.)
|
||||
|
||||
```json
|
||||
{"type":"matchRepoDigestOrExact"}
|
||||
```
|
||||
- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version.
|
||||
|
||||
```json
|
||||
{"type":"matchRepository"}
|
||||
```
|
||||
- The identity in the signature must exactly match a specified identity.
|
||||
This is useful e.g. when locally mirroring images signed using their public identity.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "exactReference",
|
||||
"dockerReference": docker_reference_value
|
||||
}
|
||||
```
|
||||
- The identity in the signature must be in the same repository as a specified identity.
|
||||
This combines the properties of `matchRepository` and `exactReference`.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": docker_repository_value
|
||||
}
|
||||
```
|
||||
|
||||
If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`.
|
||||
|
||||
*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is
|
||||
provided by the transport. In particular, the `dir:` and `oci:` transports can be only
|
||||
used with `exactReference` or `exactRepository`.
|
||||
|
||||
<!-- ### `signedBaseLayer` -->
|
||||
|
||||
## Examples
|
||||
|
||||
It is *strongly* recommended to set the `default` policy to `reject`, and then
|
||||
selectively allow individual transports and scopes as desired.
|
||||
|
||||
### A reasonably locked-down system
|
||||
|
||||
(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.)
|
||||
|
||||
```js
|
||||
{
|
||||
"default": [{"type": "reject"}], /* Reject anything not explicitly allowed */
|
||||
"transports": {
|
||||
"docker": {
|
||||
/* Allow installing images from a specific repository namespace, without cryptographic verification.
|
||||
This namespace includes images like openshift/hello-openshift and openshift/origin. */
|
||||
"docker.io/openshift": [{"type": "insecureAcceptAnything"}],
|
||||
/* Similarly, allow installing the “official” busybox images. Note how the fully expanded
|
||||
form, with the explicit /library/, must be used. */
|
||||
"docker.io/library/busybox": [{"type": "insecureAcceptAnything"}]
|
||||
/* Other docker: images use the global default policy and are rejected */
|
||||
},
|
||||
"dir": {
|
||||
"": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */
|
||||
},
|
||||
"atomic": {
|
||||
/* The common case: using a known key for a repository or set of repositories */
|
||||
"hostname:5000/myns/official": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
/* A more complex example, for a repository which contains a mirror of a third-party product,
|
||||
which must be signed-off by local IT */
|
||||
"hostname:5000/vendor/product": [
|
||||
{ /* Require the image to be signed by the original vendor, using the vendor's repository location. */
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/vendor-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "vendor-hostname/product/repository"
|
||||
}
|
||||
},
|
||||
{ /* Require the image to _also_ be signed by a local reviewer. */
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/reviewer-pubkey.gpg"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Completely disable security, allow all images, do not trust any signatures
|
||||
|
||||
```json
|
||||
{
|
||||
"default": [{"type": "insecureAcceptAnything"}]
|
||||
}
|
||||
```
|
||||
## SEE ALSO
|
||||
atomic(1)
|
||||
|
||||
## HISTORY
|
||||
August 2018, Rename to containers-policy.json(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
September 2016, Originally compiled by Miloslav Trmač <mitr@redhat.com>
|
||||
56
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
Normal file
56
vendor/github.com/containers/image/docs/containers-registries.conf.5.md
generated
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
% CONTAINERS-REGISTRIES.CONF(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
containers-registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The CONTAINERS-REGISTRIES configuration file is a system-wide configuration
|
||||
file for container image registries. The file format is TOML. The valid
|
||||
categories are: 'registries.search', 'registries.insecure', and
|
||||
'registries.block'.
|
||||
|
||||
By default, the configuration file is located at `/etc/containers/registries.conf`.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
|
||||
# EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ['registry1.com', 'registry2.com']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['registry3.com']
|
||||
|
||||
[registries.block]
|
||||
registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2018, Renamed to containers-registries.conf(5) by Valentin Rothberg <vrothberg@suse.com>
|
||||
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
128
vendor/github.com/containers/image/docs/containers-registries.d.md
generated
vendored
Normal file
128
vendor/github.com/containers/image/docs/containers-registries.d.md
generated
vendored
Normal file
@@ -0,0 +1,128 @@
|
||||
% CONTAINERS-REGISTRIES.D(5) Registries.d Man Page
|
||||
% Miloslav Trmač
|
||||
% August 2016
|
||||
|
||||
# NAME
|
||||
containers-registries.d - Directory for various registries configurations
|
||||
|
||||
# DESCRIPTION
|
||||
|
||||
The registries configuration directory contains configuration for various registries
|
||||
(servers storing remote container images), and for content stored in them,
|
||||
so that the configuration does not have to be provided in command-line options over and over for every command,
|
||||
and so that it can be shared by all users of containers/image.
|
||||
|
||||
By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`;
|
||||
applications may allow using a different directory instead.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
The directory may contain any number of files with the extension `.yaml`,
|
||||
each using the YAML format. Other than the mandatory extension, names of the files
|
||||
don’t matter.
|
||||
|
||||
The contents of these files are merged together; to have a well-defined and easy to understand
|
||||
behavior, there can be only one configuration section describing a single namespace within a registry
|
||||
(in particular there can be at most one one `default-docker` section across all files,
|
||||
and there can be at most one instance of any key under the the `docker` section;
|
||||
these sections are documented later).
|
||||
|
||||
Thus, it is forbidden to have two conflicting configurations for a single registry or scope,
|
||||
and it is also forbidden to split a configuration for a single registry or scope across
|
||||
more than one file (even if they are not semantically in conflict).
|
||||
|
||||
## Registries, Scopes and Search Order
|
||||
|
||||
Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined:
|
||||
|
||||
- `default-docker` is the _configuration section_ (as documented below)
|
||||
for registries implementing "Docker Registry HTTP API V2".
|
||||
|
||||
This key is optional.
|
||||
|
||||
- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2",
|
||||
or namespaces and individual images within these registries, as keys;
|
||||
the value assigned to any such key is a _configuration section_.
|
||||
|
||||
This key is optional.
|
||||
|
||||
Scopes matching individual images are named Docker references *in the fully expanded form*, either
|
||||
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
|
||||
|
||||
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
|
||||
a repository namespace, or a registry host (and a port if it differs from the default).
|
||||
|
||||
Note that if a registry is accessed using a hostname+port configuration, the port-less hostname
|
||||
is _not_ used as parent scope.
|
||||
|
||||
When searching for a configuration to apply for an individual container image, only
|
||||
the configuration for the most-precisely matching scope is used; configuration using
|
||||
more general scopes is ignored. For example, if _any_ configuration exists for
|
||||
`docker.io/library/busybox`, the configuration for `docker.io` is ignored
|
||||
(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`).
|
||||
|
||||
## Individual Configuration Sections
|
||||
|
||||
A single configuration section is selected for a container image using the process
|
||||
described above. The configuration section is a YAML mapping, with the following keys:
|
||||
|
||||
- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures).
|
||||
|
||||
This key is optional; if it is missing, `sigstore` below is used.
|
||||
|
||||
- `sigstore` defines an URL of the signature storage.
|
||||
This URL is used for reading existing signatures,
|
||||
and if `sigstore-staging` does not exist, also for adding or removing them.
|
||||
|
||||
This key is optional; if it is missing, no signature storage is defined (no signatures
|
||||
are download along with images, adding new signatures is possible only if `sigstore-staging` is defined).
|
||||
|
||||
## Examples
|
||||
|
||||
### Using Containers from Various Origins
|
||||
|
||||
The following demonstrates how to to consume and run images from various registries and namespaces:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
registry.database-supplier.com:
|
||||
sigstore: https://sigstore.database-supplier.com
|
||||
distribution.great-middleware.org:
|
||||
sigstore: https://security-team.great-middleware.org/sigstore
|
||||
docker.io/web-framework:
|
||||
sigstore: https://sigstore.web-framework.io:8080
|
||||
```
|
||||
|
||||
### Developing and Signing Containers, Staging Signatures
|
||||
|
||||
For developers in `example.com`:
|
||||
|
||||
- Consume most container images using the public servers also used by clients.
|
||||
- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures.
|
||||
- Craft an individual exception for a single branch a specific developer is working on locally.
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
registry.example.com:
|
||||
sigstore: https://registry-sigstore.example.com
|
||||
registry.example.com/mydepartment:
|
||||
sigstore: https://sigstore.mydepartment.example.com
|
||||
sigstore-staging: file:///mnt/mydepartment/sigstore-staging
|
||||
registry.example.com/mydepartment/myproject:mybranch:
|
||||
sigstore: http://localhost:4242/sigstore
|
||||
sigstore-staging: file:///home/useraccount/webroot/sigstore
|
||||
```
|
||||
|
||||
### A Global Default
|
||||
|
||||
If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server
|
||||
without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures.
|
||||
|
||||
```yaml
|
||||
default-docker:
|
||||
sigstore-staging: file:///mnt/company/common-sigstore-staging
|
||||
```
|
||||
|
||||
# AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
Normal file
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
# Signature access protocols
|
||||
|
||||
The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image.
|
||||
Some image transports (local storage formats and remote procotocols) implement these signatures natively
|
||||
or trivially; for others, the protocol extensions described below are necessary.
|
||||
|
||||
## docker/distribution registries—separate storage
|
||||
|
||||
### Usage
|
||||
|
||||
Any existing docker/distribution registry, whether or not it natively supports signatures,
|
||||
can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md).
|
||||
`registries.d` can be configured to use one storage URL for a whole docker/distribution server,
|
||||
or also separate URLs for smaller namespaces or individual repositories within the server
|
||||
(which e.g. allows image authors to manage their own signature storage while publishing
|
||||
the images on the public `docker.io` server).
|
||||
|
||||
The signature storage URL defines a root of a path hierarchy.
|
||||
It can be either a `file:///…` URL, pointing to a local directory structure,
|
||||
or a `http`/`https` URL, pointing to a remote server.
|
||||
`file:///` signature storage can be both read and written, `http`/`https` only supports reading.
|
||||
|
||||
The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be
|
||||
a simple static web server serving a directory structure created by writing to a `file:///` signature storage.
|
||||
(This of course does not prevent other server implementations,
|
||||
e.g. a HTTP server reading signatures from a database.)
|
||||
|
||||
The usual workflow for producing and distributing images using the separate storage mechanism
|
||||
is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private
|
||||
`file:///` staging area, and a `sigstore` URL pointing to a public web server.
|
||||
To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`),
|
||||
and then copy the created directory structure from the `file:///` staging area
|
||||
to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL.
|
||||
The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to,
|
||||
set up a `sigstore` URL pointing to the public web server.
|
||||
|
||||
### Path structure
|
||||
|
||||
Given a _base_ signature storage URL configured in `registries.d` as mentioned above,
|
||||
and a container image stored in a docker/distribution registry using the _fully-expanded_ name
|
||||
_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`,
|
||||
_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`),
|
||||
signatures are accessed using URLs of the form
|
||||
> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_
|
||||
|
||||
where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest
|
||||
(i.e. even if the user referenced the image using a tag,
|
||||
the signature storage is always disambiguated using digest references).
|
||||
Note that in the URLs used for signatures,
|
||||
_digest-algo_ and _digest-value_ are separated using the `=` character,
|
||||
not `:` like when acessing the manifest using the docker/distribution API.
|
||||
|
||||
Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
|
||||
Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,
|
||||
and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist.
|
||||
Similarly, to add one more signature to an image, find the first _index_ which does not exist, and
|
||||
then store the new signature using that _index_ value.
|
||||
|
||||
There is no way to list existing signatures other than iterating through the successive _index_ values,
|
||||
and no way to download all of the signatures at once.
|
||||
|
||||
### Examples
|
||||
|
||||
For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e`
|
||||
(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest),
|
||||
and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image,
|
||||
the following URLs would be accessed to download all signatures:
|
||||
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1`
|
||||
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2`
|
||||
> - …
|
||||
|
||||
For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same
|
||||
`sigstore` URL, the signatures would be available at
|
||||
> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1`
|
||||
|
||||
and so on.
|
||||
|
||||
## (OpenShift) docker/distribution API extension
|
||||
|
||||
As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides
|
||||
an extension of the docker/distribution API which allows simpler access to the signatures,
|
||||
using only the docker/distribution API endpoint.
|
||||
|
||||
This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint,
|
||||
and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well),
|
||||
and it is the preferred way implement signature storage in registries.
|
||||
|
||||
See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API.
|
||||
|
||||
To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…`
|
||||
path to read an array of signatures. Use only the signature objects
|
||||
which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`;
|
||||
ignore the other fields of the signature object.
|
||||
|
||||
To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`,
|
||||
and `content` set to the signature. Also set `name` to an unique name with the form
|
||||
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL),
|
||||
and _per-image-name_ is any unique identifier.
|
||||
|
||||
To add more than one signature, add them one at a time. This API does not allow deleting signatures.
|
||||
|
||||
Note that because signatures are stored within the cluster-wide image objects,
|
||||
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||
(by default available to the `system:image-signer` role),
|
||||
|
||||
## OpenShift-embedded registries
|
||||
|
||||
The OpenShift-embedded registry implements the ordinary docker/distribution API,
|
||||
and it also exposes images through the OpenShift REST API (available through the “API master” servers).
|
||||
|
||||
Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension),
|
||||
which is easier to set up and should usually be preferred.
|
||||
Continue reading for details on using older versions of OpenShift.
|
||||
|
||||
As of https://github.com/openshift/origin/pull/9181,
|
||||
signatures are exposed through the OpenShift API
|
||||
(i.e. to access the complete image, it is necessary to use both APIs,
|
||||
in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints).
|
||||
|
||||
To read the signature, any user with access to an image can use the `imagestreamimages` namespaced
|
||||
resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects
|
||||
which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of
|
||||
the `ImageSignature` object.
|
||||
|
||||
To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource,
|
||||
with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form
|
||||
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”),
|
||||
and _per-image-name_ is any unique identifier.
|
||||
|
||||
Note that because signatures are stored within the cluster-wide image objects,
|
||||
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||
(by default available to the `system:image-signer` role),
|
||||
and deleting signatures is strongly discouraged
|
||||
(it deletes the signature from all namespaces which contain the same image).
|
||||
13
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
13
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
@@ -17,7 +17,7 @@ import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/vbatts/tar-split/tar/asm"
|
||||
@@ -313,24 +313,19 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, ca
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
defer mfz.Close()
|
||||
metaUnpacker := storage.NewJSONUnpacker(mfz)
|
||||
|
||||
getter, err := newOSTreePathFileGetter(s.repo, branch)
|
||||
if err != nil {
|
||||
mfz.Close()
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
ots := asm.NewOutputTarStream(getter, metaUnpacker)
|
||||
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
go func() {
|
||||
io.Copy(pipeWriter, ots)
|
||||
pipeWriter.Close()
|
||||
}()
|
||||
|
||||
rc := ioutils.NewReadCloserWrapper(pipeReader, func() error {
|
||||
rc := ioutils.NewReadCloserWrapper(ots, func() error {
|
||||
getter.Close()
|
||||
mfz.Close()
|
||||
return ots.Close()
|
||||
})
|
||||
return rc, layerSize, nil
|
||||
|
||||
20
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
20
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
@@ -17,6 +18,7 @@ type locationKey struct {
|
||||
|
||||
// memoryCache implements an in-memory-only BlobInfoCache
|
||||
type memoryCache struct {
|
||||
mutex *sync.Mutex // synchronizes concurrent accesses
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
@@ -28,6 +30,7 @@ type memoryCache struct {
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
|
||||
func NewMemoryCache() types.BlobInfoCache {
|
||||
return &memoryCache{
|
||||
mutex: new(sync.Mutex),
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
@@ -38,6 +41,15 @@ func NewMemoryCache() types.BlobInfoCache {
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
return mem.uncompressedDigest(anyDigest)
|
||||
}
|
||||
|
||||
// uncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *memoryCache) uncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||
return d
|
||||
}
|
||||
@@ -56,6 +68,8 @@ func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Diges
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
@@ -72,6 +86,8 @@ func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, un
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||
locationScope, ok := mem.knownLocations[key]
|
||||
if !ok {
|
||||
@@ -103,11 +119,13 @@ func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTi
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
res := []candidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
||||
if uncompressedDigest = mem.uncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
|
||||
23
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
23
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@@ -53,20 +53,23 @@ type Registry struct {
|
||||
Prefix string `toml:"prefix"`
|
||||
}
|
||||
|
||||
// backwards compatability to sysregistries v1
|
||||
type v1TOMLregistries struct {
|
||||
// V1TOMLregistries is for backwards compatibility to sysregistries v1
|
||||
type V1TOMLregistries struct {
|
||||
Registries []string `toml:"registries"`
|
||||
}
|
||||
|
||||
// V1TOMLConfig is for backwards compatibility to sysregistries v1
|
||||
type V1TOMLConfig struct {
|
||||
Search V1TOMLregistries `toml:"search"`
|
||||
Insecure V1TOMLregistries `toml:"insecure"`
|
||||
Block V1TOMLregistries `toml:"block"`
|
||||
}
|
||||
|
||||
// tomlConfig is the data type used to unmarshal the toml config.
|
||||
type tomlConfig struct {
|
||||
Registries []Registry `toml:"registry"`
|
||||
// backwards compatability to sysregistries v1
|
||||
V1Registries struct {
|
||||
Search v1TOMLregistries `toml:"search"`
|
||||
Insecure v1TOMLregistries `toml:"insecure"`
|
||||
Block v1TOMLregistries `toml:"block"`
|
||||
} `toml:"registries"`
|
||||
V1TOMLConfig `toml:"registries"`
|
||||
}
|
||||
|
||||
// InvalidRegistries represents an invalid registry configurations. An example
|
||||
@@ -129,21 +132,21 @@ func getV1Registries(config *tomlConfig) ([]Registry, error) {
|
||||
|
||||
// Note: config.V1Registries.Search needs to be processed first to ensure registryOrder is populated in the right order
|
||||
// if one of the search registries is also in one of the other lists.
|
||||
for _, search := range config.V1Registries.Search.Registries {
|
||||
for _, search := range config.V1TOMLConfig.Search.Registries {
|
||||
reg, err := getRegistry(search)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Search = true
|
||||
}
|
||||
for _, blocked := range config.V1Registries.Block.Registries {
|
||||
for _, blocked := range config.V1TOMLConfig.Block.Registries {
|
||||
reg, err := getRegistry(blocked)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Blocked = true
|
||||
}
|
||||
for _, insecure := range config.V1Registries.Insecure.Registries {
|
||||
for _, insecure := range config.V1TOMLConfig.Insecure.Registries {
|
||||
reg, err := getRegistry(insecure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
73
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
73
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
@@ -70,6 +71,13 @@ type storageImageCloser struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
||||
// If a specific manifest digest is explicitly requested by the user, the key retruned function should be used preferably;
|
||||
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
|
||||
func manifestBigDataKey(digest digest.Digest) string {
|
||||
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
|
||||
}
|
||||
|
||||
// newImageSource sets up an image for reading.
|
||||
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
// First, locate the image.
|
||||
@@ -177,12 +185,29 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
return nil, "", ErrNoManifestLists
|
||||
}
|
||||
if len(s.cachedManifest) == 0 {
|
||||
// We stored the manifest as an item named after storage.ImageDigestBigDataKey.
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
// The manifest is stored as a big data item.
|
||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||
if s.imageRef.named != nil {
|
||||
if digested, ok := s.imageRef.named.(reference.Digested); ok {
|
||||
key := manifestBigDataKey(digested.Digest())
|
||||
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
||||
if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
|
||||
return nil, "", err
|
||||
}
|
||||
if err == nil {
|
||||
s.cachedManifest = blob
|
||||
}
|
||||
}
|
||||
}
|
||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||
if len(s.cachedManifest) == 0 {
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
||||
}
|
||||
@@ -660,6 +685,7 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
}
|
||||
|
||||
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
|
||||
// was originally created, in case we're just copying it. If not, no harm done.
|
||||
options := &storage.ImageOptions{}
|
||||
@@ -667,9 +693,6 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
logrus.Debugf("setting image creation date to %s", inspect.Created)
|
||||
options.CreationDate = *inspect.Created
|
||||
}
|
||||
if manifestDigest, err := manifest.Digest(s.manifest); err == nil {
|
||||
options.Digest = manifestDigest
|
||||
}
|
||||
// Create the image record, pointing to the most-recently added layer.
|
||||
intendedID := s.imageRef.id
|
||||
if intendedID == "" {
|
||||
@@ -735,8 +758,20 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
}
|
||||
logrus.Debugf("set names of image %q to %v", img.ID, names)
|
||||
}
|
||||
// Save the manifest. Use storage.ImageDigestBigDataKey as the item's
|
||||
// name, so that its digest can be used to locate the image in the Store.
|
||||
// Save the manifest. Allow looking it up by digest by using the key convention defined by the Store.
|
||||
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
|
||||
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
|
||||
manifestDigest, err := manifest.Digest(s.manifest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing manifest digest")
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, manifestBigDataKey(manifestDigest), s.manifest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
@@ -788,9 +823,21 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
|
||||
}
|
||||
|
||||
// PutManifest writes the manifest to the destination.
|
||||
func (s *storageImageDestination) PutManifest(ctx context.Context, manifest []byte) error {
|
||||
s.manifest = make([]byte, len(manifest))
|
||||
copy(s.manifest, manifest)
|
||||
func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob []byte) error {
|
||||
if s.imageRef.named != nil {
|
||||
if digested, ok := s.imageRef.named.(reference.Digested); ok {
|
||||
matches, err := manifest.MatchesDigest(manifestBlob, digested.Digest())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !matches {
|
||||
return fmt.Errorf("Manifest does not match expected digest %s", digested.Digest())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.manifest = make([]byte, len(manifestBlob))
|
||||
copy(s.manifest, manifestBlob)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
22
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
22
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
@@ -55,7 +55,7 @@ func imageMatchesRepo(image *storage.Image, ref reference.Named) bool {
|
||||
// one present with the same name or ID, and return the image.
|
||||
func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
var loadedImage *storage.Image
|
||||
if s.id == "" {
|
||||
if s.id == "" && s.named != nil {
|
||||
// Look for an image that has the expanded reference name as an explicit Name value.
|
||||
image, err := s.transport.store.Image(s.named.String())
|
||||
if image != nil && err == nil {
|
||||
@@ -69,7 +69,7 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
// though possibly with a different tag or digest, as a Name value, so
|
||||
// that the canonical reference can be implicitly resolved to the image.
|
||||
images, err := s.transport.store.ImagesByDigest(digested.Digest())
|
||||
if images != nil && err == nil {
|
||||
if err == nil && len(images) > 0 {
|
||||
for _, image := range images {
|
||||
if imageMatchesRepo(image, s.named) {
|
||||
loadedImage = image
|
||||
@@ -97,6 +97,24 @@ func (s *storageReference) resolveImage() (*storage.Image, error) {
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
}
|
||||
// Default to having the image digest that we hand back match the most recently
|
||||
// added manifest...
|
||||
if digest, ok := loadedImage.BigDataDigests[storage.ImageDigestBigDataKey]; ok {
|
||||
loadedImage.Digest = digest
|
||||
}
|
||||
// ... unless the named reference says otherwise, and it matches one of the digests
|
||||
// in the image. For those cases, set the Digest field to that value, for the
|
||||
// sake of older consumers that don't know there's a whole list in there now.
|
||||
if s.named != nil {
|
||||
if digested, ok := s.named.(reference.Digested); ok {
|
||||
for _, digest := range loadedImage.Digests {
|
||||
if digest == digested.Digest() {
|
||||
loadedImage.Digest = digest
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return loadedImage, nil
|
||||
}
|
||||
|
||||
|
||||
5
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
5
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@@ -284,11 +284,6 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
|
||||
}
|
||||
}
|
||||
if sref, ok := ref.(*storageReference); ok {
|
||||
if sref.id != "" {
|
||||
if img, err := store.Image(sref.id); err == nil {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
tmpRef := *sref
|
||||
if img, err := tmpRef.resolveImage(); err == nil {
|
||||
return img, nil
|
||||
|
||||
4
vendor/github.com/containers/image/vendor.conf
generated
vendored
4
vendor/github.com/containers/image/vendor.conf
generated
vendored
@@ -28,7 +28,6 @@ golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
||||
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27
|
||||
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
@@ -48,3 +47,6 @@ github.com/boltdb/bolt master
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/vbauerster/mpb v3.3.4
|
||||
github.com/mattn/go-isatty v0.0.4
|
||||
github.com/VividCortex/ewma v1.1.1
|
||||
|
||||
4
vendor/github.com/containers/image/version/version.go
generated
vendored
4
vendor/github.com/containers/image/version/version.go
generated
vendored
@@ -8,10 +8,10 @@ const (
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 1
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
VersionPatch = 5
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = "-dev"
|
||||
VersionDev = ""
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
||||
3
vendor/github.com/containers/storage/containers.go
generated
vendored
3
vendor/github.com/containers/storage/containers.go
generated
vendored
@@ -568,6 +568,9 @@ func (r *containerStore) Lock() {
|
||||
r.lockfile.Lock()
|
||||
}
|
||||
|
||||
func (r *containerStore) RLock() {
|
||||
r.lockfile.RLock()
|
||||
}
|
||||
func (r *containerStore) Unlock() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@@ -253,6 +253,11 @@ func (a *Driver) AdditionalImageStores() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (a *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return graphdriver.NaiveCreateFromTemplate(a, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (a *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@@ -490,6 +490,11 @@ func (d *Driver) quotasDirID(id string) string {
|
||||
return path.Join(d.quotasDir(), id)
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/devmapper/driver.go
generated
vendored
@@ -123,6 +123,11 @@ func (d *Driver) Cleanup() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
||||
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
3
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@@ -72,6 +72,9 @@ type ProtoDriver interface {
|
||||
// specified id and parent and options passed in opts. Parent
|
||||
// may be "" and opts may be nil.
|
||||
Create(id, parent string, opts *CreateOpts) error
|
||||
// CreateFromTemplate creates a new filesystem layer with the specified id
|
||||
// and parent, with contents identical to the specified template layer.
|
||||
CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error
|
||||
// Remove attempts to remove the filesystem layer with this id.
|
||||
Remove(id string) error
|
||||
// Get returns the mountpoint for the layered filesystem referred
|
||||
|
||||
66
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
66
vendor/github.com/containers/storage/drivers/overlay/check.go
generated
vendored
@@ -10,6 +10,8 @@ import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -57,10 +59,11 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
}
|
||||
|
||||
opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", path.Join(td, "l2"), path.Join(td, "l1"), path.Join(td, "l3"), path.Join(td, "work"))
|
||||
if mountOpts != "" {
|
||||
opts = fmt.Sprintf("%s,%s", opts, mountOpts)
|
||||
flags, data := mount.ParseOptions(mountOpts)
|
||||
if data != "" {
|
||||
opts = fmt.Sprintf("%s,%s", opts, data)
|
||||
}
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", 0, opts); err != nil {
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
|
||||
return errors.Wrap(err, "failed to mount overlay")
|
||||
}
|
||||
defer func() {
|
||||
@@ -103,3 +106,60 @@ func doesSupportNativeDiff(d, mountOpts string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// doesMetacopy checks if the filesystem is going to optimize changes to
|
||||
// metadata by using nodes marked with an "overlay.metacopy" attribute to avoid
|
||||
// copying up a file from a lower layer unless/until its contents are being
|
||||
// modified
|
||||
func doesMetacopy(d, mountOpts string) (bool, error) {
|
||||
td, err := ioutil.TempDir(d, "metacopy-check")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(td); err != nil {
|
||||
logrus.Warnf("Failed to remove check directory %v: %v", td, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Make directories l1, l2, work, merged
|
||||
if err := os.MkdirAll(filepath.Join(td, "l1"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(filepath.Join(td, "l1", "f"), []byte{0xff}, 0700); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Join(td, "l2"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Mkdir(filepath.Join(td, "merged"), 0755); err != nil {
|
||||
return false, err
|
||||
}
|
||||
// Mount using the mandatory options and configured options
|
||||
opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", path.Join(td, "l1"), path.Join(td, "l2"), path.Join(td, "work"))
|
||||
flags, data := mount.ParseOptions(mountOpts)
|
||||
if data != "" {
|
||||
opts = fmt.Sprintf("%s,%s", opts, data)
|
||||
}
|
||||
if err := unix.Mount("overlay", filepath.Join(td, "merged"), "overlay", uintptr(flags), opts); err != nil {
|
||||
return false, errors.Wrap(err, "failed to mount overlay for metacopy check")
|
||||
}
|
||||
defer func() {
|
||||
if err := unix.Unmount(filepath.Join(td, "merged"), 0); err != nil {
|
||||
logrus.Warnf("Failed to unmount check directory %v: %v", filepath.Join(td, "merged"), err)
|
||||
}
|
||||
}()
|
||||
// Make a change that only impacts the inode, and check if the pulled-up copy is marked
|
||||
// as a metadata-only copy
|
||||
if err := os.Chmod(filepath.Join(td, "merged", "f"), 0600); err != nil {
|
||||
return false, errors.Wrap(err, "error changing permissions on file for metacopy check")
|
||||
}
|
||||
metacopy, err := system.Lgetxattr(filepath.Join(td, "l2", "f"), "trusted.overlay.metacopy")
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "metacopy flag was not set on file in upper layer")
|
||||
}
|
||||
return metacopy != nil, nil
|
||||
}
|
||||
|
||||
57
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
57
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@@ -14,6 +14,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/drivers/overlayutils"
|
||||
@@ -84,13 +85,12 @@ const (
|
||||
)
|
||||
|
||||
type overlayOptions struct {
|
||||
overrideKernelCheck bool
|
||||
imageStores []string
|
||||
quota quota.Quota
|
||||
mountProgram string
|
||||
ostreeRepo string
|
||||
skipMountHome bool
|
||||
mountOptions string
|
||||
imageStores []string
|
||||
quota quota.Quota
|
||||
mountProgram string
|
||||
ostreeRepo string
|
||||
skipMountHome bool
|
||||
mountOptions string
|
||||
}
|
||||
|
||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||
@@ -104,6 +104,7 @@ type Driver struct {
|
||||
options overlayOptions
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
supportsDType bool
|
||||
usingMetacopy bool
|
||||
locker *locker.Locker
|
||||
convert map[string]bool
|
||||
}
|
||||
@@ -157,6 +158,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var usingMetacopy bool
|
||||
var supportsDType bool
|
||||
if opts.mountProgram != "" {
|
||||
supportsDType = true
|
||||
@@ -165,8 +167,23 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
if err != nil {
|
||||
os.Remove(filepath.Join(home, linkDir))
|
||||
os.Remove(home)
|
||||
patherr, ok := err.(*os.PathError)
|
||||
if ok && patherr.Err == syscall.ENOSPC {
|
||||
return nil, err
|
||||
}
|
||||
return nil, errors.Wrap(err, "kernel does not support overlay fs")
|
||||
}
|
||||
usingMetacopy, err = doesMetacopy(home, opts.mountOptions)
|
||||
if err == nil {
|
||||
if usingMetacopy {
|
||||
logrus.Debugf("overlay test mount indicated that metacopy is being used")
|
||||
} else {
|
||||
logrus.Debugf("overlay test mount indicated that metacopy is not being used")
|
||||
}
|
||||
} else {
|
||||
logrus.Warnf("overlay test mount did not indicate whether or not metacopy is being used: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if !opts.skipMountHome {
|
||||
@@ -188,6 +205,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
gidMaps: gidMaps,
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)),
|
||||
supportsDType: supportsDType,
|
||||
usingMetacopy: usingMetacopy,
|
||||
locker: locker.New(),
|
||||
options: *opts,
|
||||
convert: make(map[string]bool),
|
||||
@@ -207,7 +225,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs)
|
||||
}
|
||||
|
||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff())
|
||||
logrus.Debugf("backingFs=%s, projectQuotaSupported=%v, useNativeDiff=%v, usingMetacopy=%v", backingFs, projectQuotaSupported, !d.useNaiveDiff(), d.usingMetacopy)
|
||||
|
||||
return d, nil
|
||||
}
|
||||
@@ -222,11 +240,7 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check":
|
||||
logrus.Debugf("overlay: override_kernelcheck=%s", val)
|
||||
o.overrideKernelCheck, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logrus.Warnf("overlay: override_kernel_check option was specified, but is no longer necessary")
|
||||
case ".mountopt", "overlay.mountopt", "overlay2.mountopt":
|
||||
o.mountOptions = val
|
||||
case ".size", "overlay.size", "overlay2.size":
|
||||
@@ -285,6 +299,12 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
exec.Command("modprobe", "overlay").Run()
|
||||
|
||||
layerDir, err := ioutil.TempDir(home, "compat")
|
||||
if err != nil {
|
||||
patherr, ok := err.(*os.PathError)
|
||||
if ok && patherr.Err == syscall.ENOSPC {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
// Check if reading the directory's contents populates the d_type field, which is required
|
||||
// for proper operation of the overlay filesystem.
|
||||
@@ -364,6 +384,7 @@ func (d *Driver) Status() [][2]string {
|
||||
{"Backing Filesystem", backingFs},
|
||||
{"Supports d_type", strconv.FormatBool(d.supportsDType)},
|
||||
{"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())},
|
||||
{"Using metacopy", strconv.FormatBool(d.usingMetacopy)},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,6 +420,14 @@ func (d *Driver) Cleanup() error {
|
||||
return mount.Unmount(d.home)
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
if readWrite {
|
||||
return d.CreateReadWrite(id, template, opts)
|
||||
}
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
@@ -782,6 +811,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
mountTarget = path.Join(id, "merged")
|
||||
}
|
||||
flags, data := mount.ParseOptions(mountData)
|
||||
logrus.Debugf("overlay: mount_data=%s", mountData)
|
||||
if err := mountFunc("overlay", mountTarget, "overlay", uintptr(flags), data); err != nil {
|
||||
return "", fmt.Errorf("error creating overlay mount to %s: %v", mountTarget, err)
|
||||
}
|
||||
@@ -975,6 +1005,7 @@ func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMapp
|
||||
// Mount the new layer and handle ownership changes and possible copy_ups in it.
|
||||
options := graphdriver.MountOpts{
|
||||
MountLabel: mountLabel,
|
||||
Options: strings.Split(d.options.mountOptions, ","),
|
||||
}
|
||||
layerFs, err := d.get(id, true, options)
|
||||
if err != nil {
|
||||
|
||||
45
vendor/github.com/containers/storage/drivers/template.go
generated
vendored
Normal file
45
vendor/github.com/containers/storage/drivers/template.go
generated
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
package graphdriver
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
)
|
||||
|
||||
// TemplateDriver is just barely enough of a driver that we can implement a
|
||||
// naive version of CreateFromTemplate on top of it.
|
||||
type TemplateDriver interface {
|
||||
DiffDriver
|
||||
CreateReadWrite(id, parent string, opts *CreateOpts) error
|
||||
Create(id, parent string, opts *CreateOpts) error
|
||||
Remove(id string) error
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as
|
||||
// another layer. Internally, it may even depend on that other layer
|
||||
// continuing to exist, as if it were actually a child of the child layer.
|
||||
func NaiveCreateFromTemplate(d TemplateDriver, id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *CreateOpts, readWrite bool) error {
|
||||
var err error
|
||||
if readWrite {
|
||||
err = d.CreateReadWrite(id, parent, opts)
|
||||
} else {
|
||||
err = d.Create(id, parent, opts)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
diff, err := d.Diff(template, templateIDMappings, parent, parentIDMappings, opts.MountLabel)
|
||||
if err != nil {
|
||||
if err2 := d.Remove(id); err2 != nil {
|
||||
logrus.Errorf("error removing layer %q: %v", id, err2)
|
||||
}
|
||||
return err
|
||||
}
|
||||
if _, err = d.ApplyDiff(id, templateIDMappings, parent, opts.MountLabel, diff); err != nil {
|
||||
if err2 := d.Remove(id); err2 != nil {
|
||||
logrus.Errorf("error removing layer %q: %v", id, err2)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
8
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
8
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@@ -99,6 +99,14 @@ func (d *Driver) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
if readWrite {
|
||||
return d.CreateReadWrite(id, template, opts)
|
||||
}
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
||||
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/windows/windows.go
generated
vendored
@@ -185,6 +185,11 @@ func (d *Driver) Exists(id string) bool {
|
||||
return result
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return graphdriver.NaiveCreateFromTemplate(d, id, template, templateIDMappings, parent, parentIDMappings, opts, readWrite)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
|
||||
67
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
67
vendor/github.com/containers/storage/drivers/zfs/zfs.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build linux freebsd solaris
|
||||
// +build linux freebsd
|
||||
|
||||
package zfs
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
zfs "github.com/mistifyio/go-zfs"
|
||||
"github.com/mistifyio/go-zfs"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -38,7 +38,7 @@ type Logger struct{}
|
||||
|
||||
// Log wraps log message from ZFS driver with a prefix '[zfs]'.
|
||||
func (*Logger) Log(cmd []string) {
|
||||
logrus.Debugf("[zfs] %s", strings.Join(cmd, " "))
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("%s", strings.Join(cmd, " "))
|
||||
}
|
||||
|
||||
// Init returns a new ZFS driver.
|
||||
@@ -47,14 +47,16 @@ func (*Logger) Log(cmd []string) {
|
||||
func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) {
|
||||
var err error
|
||||
|
||||
logger := logrus.WithField("storage-driver", "zfs")
|
||||
|
||||
if _, err := exec.LookPath("zfs"); err != nil {
|
||||
logrus.Debugf("[zfs] zfs command is not available: %v", err)
|
||||
logger.Debugf("zfs command is not available: %v", err)
|
||||
return nil, errors.Wrap(graphdriver.ErrPrerequisites, "the 'zfs' command is not available")
|
||||
}
|
||||
|
||||
file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err)
|
||||
logger.Debugf("cannot open /dev/zfs: %v", err)
|
||||
return nil, errors.Wrapf(graphdriver.ErrPrerequisites, "could not open /dev/zfs: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
@@ -109,9 +111,6 @@ func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdri
|
||||
return nil, fmt.Errorf("Failed to create '%s': %v", base, err)
|
||||
}
|
||||
|
||||
if err := mount.MakePrivate(base); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d := &Driver{
|
||||
dataset: rootDataset,
|
||||
options: options,
|
||||
@@ -157,7 +156,7 @@ func lookupZfsDataset(rootdir string) (string, error) {
|
||||
}
|
||||
for _, m := range mounts {
|
||||
if err := unix.Stat(m.Mountpoint, &stat); err != nil {
|
||||
logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err)
|
||||
continue // may fail on fuse file systems
|
||||
}
|
||||
|
||||
@@ -184,7 +183,7 @@ func (d *Driver) String() string {
|
||||
return "zfs"
|
||||
}
|
||||
|
||||
// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver.
|
||||
// Cleanup is called on when program exits, it is a no-op for ZFS.
|
||||
func (d *Driver) Cleanup() error {
|
||||
return nil
|
||||
}
|
||||
@@ -260,6 +259,11 @@ func (d *Driver) mountPath(id string) string {
|
||||
return path.Join(d.options.mountPath, "graph", getMountpoint(id))
|
||||
}
|
||||
|
||||
// CreateFromTemplate creates a layer with the same contents and parent as another layer.
|
||||
func (d *Driver) CreateFromTemplate(id, template string, templateIDMappings *idtools.IDMappings, parent string, parentIDMappings *idtools.IDMappings, opts *graphdriver.CreateOpts, readWrite bool) error {
|
||||
return d.Create(id, template, opts)
|
||||
}
|
||||
|
||||
// CreateReadWrite creates a layer that is writable for use as a container
|
||||
// file system.
|
||||
func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
|
||||
@@ -360,11 +364,25 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get returns the mountpoint for the given id after creating the target directories if necessary.
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
|
||||
mountpoint := d.mountPath(id)
|
||||
if count := d.ctr.Increment(mountpoint); count > 1 {
|
||||
return mountpoint, nil
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
if c := d.ctr.Decrement(mountpoint); c <= 0 {
|
||||
if mntErr := unix.Unmount(mountpoint, 0); mntErr != nil {
|
||||
logrus.WithField("storage-driver", "zfs").Errorf("Error unmounting %v: %v", mountpoint, mntErr)
|
||||
}
|
||||
if rmErr := unix.Rmdir(mountpoint); rmErr != nil && !os.IsNotExist(rmErr) {
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("Failed to remove %s: %v", id, rmErr)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
mountOptions := d.options.mountOptions
|
||||
if len(options.Options) > 0 {
|
||||
@@ -373,29 +391,24 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
|
||||
filesystem := d.zfsPath(id)
|
||||
opts := label.FormatMountLabel(mountOptions, options.MountLabel)
|
||||
logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
|
||||
logrus.WithField("storage-driver", "zfs").Debugf(`mount("%s", "%s", "%s")`, filesystem, mountpoint, opts)
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
|
||||
if err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", err
|
||||
}
|
||||
// Create the target directories if they don't exist
|
||||
if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := mount.Mount(filesystem, mountpoint, "zfs", opts); err != nil {
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
|
||||
return "", errors.Wrap(err, "error creating zfs mount")
|
||||
}
|
||||
|
||||
// this could be our first mount after creation of the filesystem, and the root dir may still have root
|
||||
// permissions instead of the remapped root uid:gid (if user namespaces are enabled):
|
||||
if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
|
||||
mount.Unmount(mountpoint)
|
||||
d.ctr.Decrement(mountpoint)
|
||||
return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
|
||||
}
|
||||
|
||||
@@ -408,16 +421,18 @@ func (d *Driver) Put(id string) error {
|
||||
if count := d.ctr.Decrement(mountpoint); count > 0 {
|
||||
return nil
|
||||
}
|
||||
mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint)
|
||||
if err != nil || !mounted {
|
||||
return err
|
||||
|
||||
logger := logrus.WithField("storage-driver", "zfs")
|
||||
|
||||
logger.Debugf(`unmount("%s")`, mountpoint)
|
||||
|
||||
if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil {
|
||||
logger.Warnf("Failed to unmount %s mount %s: %v", id, mountpoint, err)
|
||||
}
|
||||
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
|
||||
logger.Debugf("Failed to remove %s mount point %s: %v", id, mountpoint, err)
|
||||
}
|
||||
|
||||
logrus.Debugf(`[zfs] unmount("%s")`, mountpoint)
|
||||
|
||||
if err := mount.Unmount(mountpoint); err != nil {
|
||||
return fmt.Errorf("error unmounting to %s: %v", mountpoint, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs_freebsd.go
generated
vendored
@@ -18,7 +18,7 @@ func checkRootdirFs(rootdir string) error {
|
||||
|
||||
// on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ]
|
||||
if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) {
|
||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
|
||||
logrus.WithField("storage-driver", "zfs").Debugf("no zfs dataset found for rootdir '%s'", rootdir)
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
|
||||
}
|
||||
|
||||
|
||||
21
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
21
vendor/github.com/containers/storage/drivers/zfs/zfs_linux.go
generated
vendored
@@ -1,23 +1,24 @@
|
||||
package zfs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func checkRootdirFs(rootdir string) error {
|
||||
var buf unix.Statfs_t
|
||||
if err := unix.Statfs(rootdir, &buf); err != nil {
|
||||
return fmt.Errorf("Failed to access '%s': %s", rootdir, err)
|
||||
func checkRootdirFs(rootDir string) error {
|
||||
fsMagic, err := graphdriver.GetFSMagic(rootDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
backingFS := "unknown"
|
||||
if fsName, ok := graphdriver.FsNames[fsMagic]; ok {
|
||||
backingFS = fsName
|
||||
}
|
||||
|
||||
if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs {
|
||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
|
||||
if fsMagic != graphdriver.FsMagicZfs {
|
||||
logrus.WithField("root", rootDir).WithField("backingFS", backingFS).WithField("storage-driver", "zfs").Error("No zfs dataset found for root")
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootDir)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
59
vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go
generated
vendored
59
vendor/github.com/containers/storage/drivers/zfs/zfs_solaris.go
generated
vendored
@@ -1,59 +0,0 @@
|
||||
// +build solaris,cgo
|
||||
|
||||
package zfs
|
||||
|
||||
/*
|
||||
#include <sys/statvfs.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
static inline struct statvfs *getstatfs(char *s) {
|
||||
struct statvfs *buf;
|
||||
int err;
|
||||
buf = (struct statvfs *)malloc(sizeof(struct statvfs));
|
||||
err = statvfs(s, buf);
|
||||
return buf;
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func checkRootdirFs(rootdir string) error {
|
||||
|
||||
cs := C.CString(filepath.Dir(rootdir))
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
buf := C.getstatfs(cs)
|
||||
defer C.free(unsafe.Pointer(buf))
|
||||
|
||||
// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
|
||||
if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
|
||||
(buf.f_basetype[3] != 0) {
|
||||
logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
|
||||
return errors.Wrapf(graphdriver.ErrPrerequisites, "no zfs dataset found for rootdir '%s'", rootdir)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
/* rootfs is introduced to comply with the OCI spec
|
||||
which states that root filesystem must be mounted at <CID>/rootfs/ instead of <CID>/
|
||||
*/
|
||||
func getMountpoint(id string) string {
|
||||
maxlen := 12
|
||||
|
||||
// we need to preserve filesystem suffix
|
||||
suffix := strings.SplitN(id, "-", 2)
|
||||
|
||||
if len(suffix) > 1 {
|
||||
return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root")
|
||||
}
|
||||
|
||||
return filepath.Join(id[:maxlen], "rootfs", "root")
|
||||
}
|
||||
2
vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/zfs/zfs_unsupported.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// +build !linux,!freebsd,!solaris
|
||||
// +build !linux,!freebsd
|
||||
|
||||
package zfs
|
||||
|
||||
|
||||
2
vendor/github.com/containers/storage/errors.go
generated
vendored
2
vendor/github.com/containers/storage/errors.go
generated
vendored
@@ -43,8 +43,6 @@ var (
|
||||
ErrSizeUnknown = errors.New("size is not known")
|
||||
// ErrStoreIsReadOnly is returned when the caller makes a call to a read-only store that would require modifying its contents.
|
||||
ErrStoreIsReadOnly = errors.New("called a write method on a read-only store")
|
||||
// ErrLockReadOnly indicates that the caller only took a read-only lock, and is not allowed to write.
|
||||
ErrLockReadOnly = errors.New("lock is not a read-write lock")
|
||||
// ErrDuplicateImageNames indicates that the read-only store uses the same name for multiple images.
|
||||
ErrDuplicateImageNames = errors.New("read-only image store assigns the same name to multiple images")
|
||||
// ErrDuplicateLayerNames indicates that the read-only store uses the same name for multiple layers.
|
||||
|
||||
205
vendor/github.com/containers/storage/images.go
generated
vendored
205
vendor/github.com/containers/storage/images.go
generated
vendored
@@ -5,8 +5,10 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/pkg/stringid"
|
||||
"github.com/containers/storage/pkg/truncindex"
|
||||
@@ -15,9 +17,13 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// ImageDigestBigDataKey is the name of the big data item whose
|
||||
// contents we consider useful for computing a "digest" of the
|
||||
// image, by which we can locate the image later.
|
||||
// ImageDigestManifestBigDataNamePrefix is a prefix of big data item
|
||||
// names which we consider to be manifests, used for computing a
|
||||
// "digest" value for the image as a whole, by which we can locate the
|
||||
// image later.
|
||||
ImageDigestManifestBigDataNamePrefix = "manifest"
|
||||
// ImageDigestBigDataKey is provided for compatibility with older
|
||||
// versions of the image library. It will be removed in the future.
|
||||
ImageDigestBigDataKey = "manifest"
|
||||
)
|
||||
|
||||
@@ -27,12 +33,19 @@ type Image struct {
|
||||
// value which was generated by the library.
|
||||
ID string `json:"id"`
|
||||
|
||||
// Digest is a digest value that we can use to locate the image.
|
||||
// Digest is a digest value that we can use to locate the image, if one
|
||||
// was specified at creation-time.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// Digests is a list of digest values of the image's manifests, and
|
||||
// possibly a manually-specified value, that we can use to locate the
|
||||
// image. If Digest is set, its value is also in this list.
|
||||
Digests []digest.Digest `json:"-"`
|
||||
|
||||
// Names is an optional set of user-defined convenience values. The
|
||||
// image can be referred to by its ID or any of its names. Names are
|
||||
// unique among images.
|
||||
// unique among images, and are often the text representation of tagged
|
||||
// or canonical references.
|
||||
Names []string `json:"names,omitempty"`
|
||||
|
||||
// TopLayer is the ID of the topmost layer of the image itself, if the
|
||||
@@ -42,7 +55,9 @@ type Image struct {
|
||||
|
||||
// MappedTopLayers are the IDs of alternate versions of the top layer
|
||||
// which have the same contents and parent, and which differ from
|
||||
// TopLayer only in which ID mappings they use.
|
||||
// TopLayer only in which ID mappings they use. When the image is
|
||||
// to be removed, they should be removed before the TopLayer, as the
|
||||
// graph driver may depend on that.
|
||||
MappedTopLayers []string `json:"mapped-layers,omitempty"`
|
||||
|
||||
// Metadata is data we keep for the convenience of the caller. It is not
|
||||
@@ -90,8 +105,10 @@ type ROImageStore interface {
|
||||
// Images returns a slice enumerating the known images.
|
||||
Images() ([]Image, error)
|
||||
|
||||
// Images returns a slice enumerating the images which have a big data
|
||||
// item with the name ImageDigestBigDataKey and the specified digest.
|
||||
// ByDigest returns a slice enumerating the images which have either an
|
||||
// explicitly-set digest, or a big data item with a name that starts
|
||||
// with ImageDigestManifestBigDataNamePrefix, which matches the
|
||||
// specified digest.
|
||||
ByDigest(d digest.Digest) ([]*Image, error)
|
||||
}
|
||||
|
||||
@@ -109,7 +126,8 @@ type ImageStore interface {
|
||||
Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error)
|
||||
|
||||
// SetNames replaces the list of names associated with an image with the
|
||||
// supplied values.
|
||||
// supplied values. The values are expected to be valid normalized
|
||||
// named image references.
|
||||
SetNames(id string, names []string) error
|
||||
|
||||
// Delete removes the record of the image.
|
||||
@@ -133,6 +151,7 @@ func copyImage(i *Image) *Image {
|
||||
return &Image{
|
||||
ID: i.ID,
|
||||
Digest: i.Digest,
|
||||
Digests: copyDigestSlice(i.Digests),
|
||||
Names: copyStringSlice(i.Names),
|
||||
TopLayer: i.TopLayer,
|
||||
MappedTopLayers: copyStringSlice(i.MappedTopLayers),
|
||||
@@ -145,6 +164,17 @@ func copyImage(i *Image) *Image {
|
||||
}
|
||||
}
|
||||
|
||||
func copyImageSlice(slice []*Image) []*Image {
|
||||
if len(slice) > 0 {
|
||||
cp := make([]*Image, len(slice))
|
||||
for i := range slice {
|
||||
cp[i] = copyImage(slice[i])
|
||||
}
|
||||
return cp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Images() ([]Image, error) {
|
||||
images := make([]Image, len(r.images))
|
||||
for i := range r.images {
|
||||
@@ -165,6 +195,46 @@ func (r *imageStore) datapath(id, key string) string {
|
||||
return filepath.Join(r.datadir(id), makeBigDataBaseName(key))
|
||||
}
|
||||
|
||||
// bigDataNameIsManifest determines if a big data item with the specified name
|
||||
// is considered to be representative of the image, in that its digest can be
|
||||
// said to also be the image's digest. Currently, if its name is, or begins
|
||||
// with, "manifest", we say that it is.
|
||||
func bigDataNameIsManifest(name string) bool {
|
||||
return strings.HasPrefix(name, ImageDigestManifestBigDataNamePrefix)
|
||||
}
|
||||
|
||||
// recomputeDigests takes a fixed digest and a name-to-digest map and builds a
|
||||
// list of the unique values that would identify the image.
|
||||
func (image *Image) recomputeDigests() error {
|
||||
validDigests := make([]digest.Digest, 0, len(image.BigDataDigests)+1)
|
||||
digests := make(map[digest.Digest]struct{})
|
||||
if image.Digest != "" {
|
||||
if err := image.Digest.Validate(); err != nil {
|
||||
return errors.Wrapf(err, "error validating image digest %q", string(image.Digest))
|
||||
}
|
||||
digests[image.Digest] = struct{}{}
|
||||
validDigests = append(validDigests, image.Digest)
|
||||
}
|
||||
for name, digest := range image.BigDataDigests {
|
||||
if !bigDataNameIsManifest(name) {
|
||||
continue
|
||||
}
|
||||
if digest.Validate() != nil {
|
||||
return errors.Wrapf(digest.Validate(), "error validating digest %q for big data item %q", string(digest), name)
|
||||
}
|
||||
// Deduplicate the digest values.
|
||||
if _, known := digests[digest]; !known {
|
||||
digests[digest] = struct{}{}
|
||||
validDigests = append(validDigests, digest)
|
||||
}
|
||||
}
|
||||
if image.Digest == "" && len(validDigests) > 0 {
|
||||
image.Digest = validDigests[0]
|
||||
}
|
||||
image.Digests = validDigests
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *imageStore) Load() error {
|
||||
shouldSave := false
|
||||
rpath := r.imagespath()
|
||||
@@ -187,21 +257,22 @@ func (r *imageStore) Load() error {
|
||||
r.removeName(conflict, name)
|
||||
shouldSave = true
|
||||
}
|
||||
names[name] = images[n]
|
||||
}
|
||||
// Implicit digest
|
||||
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
|
||||
digests[digest] = append(digests[digest], images[n])
|
||||
// Compute the digest list.
|
||||
err = image.recomputeDigests()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error computing digests for image with ID %q (%v)", image.ID, image.Names)
|
||||
}
|
||||
// Explicit digest
|
||||
if image.Digest == "" {
|
||||
image.Digest = image.BigDataDigests[ImageDigestBigDataKey]
|
||||
} else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] {
|
||||
digests[image.Digest] = append(digests[image.Digest], images[n])
|
||||
for _, name := range image.Names {
|
||||
names[name] = image
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
list := digests[digest]
|
||||
digests[digest] = append(list, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
if shouldSave && !r.IsReadWrite() {
|
||||
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
|
||||
return ErrDuplicateImageNames
|
||||
}
|
||||
r.images = images
|
||||
@@ -220,7 +291,7 @@ func (r *imageStore) Save() error {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the image store at %q", r.imagespath())
|
||||
}
|
||||
if !r.Locked() {
|
||||
return errors.New("image store is not locked")
|
||||
return errors.New("image store is not locked for writing")
|
||||
}
|
||||
rpath := r.imagespath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
@@ -331,12 +402,12 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
}
|
||||
}
|
||||
if _, idInUse := r.byid[id]; idInUse {
|
||||
return nil, ErrDuplicateID
|
||||
return nil, errors.Wrapf(ErrDuplicateID, "an image with ID %q already exists", id)
|
||||
}
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
if _, nameInUse := r.byname[name]; nameInUse {
|
||||
return nil, ErrDuplicateName
|
||||
if image, nameInUse := r.byname[name]; nameInUse {
|
||||
return nil, errors.Wrapf(ErrDuplicateName, "image name %q is already associated with image %q", name, image.ID)
|
||||
}
|
||||
}
|
||||
if created.IsZero() {
|
||||
@@ -346,6 +417,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
image = &Image{
|
||||
ID: id,
|
||||
Digest: searchableDigest,
|
||||
Digests: nil,
|
||||
Names: names,
|
||||
TopLayer: layer,
|
||||
Metadata: metadata,
|
||||
@@ -355,16 +427,20 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c
|
||||
Created: created,
|
||||
Flags: make(map[string]interface{}),
|
||||
}
|
||||
err := image.recomputeDigests()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error validating digests for new image")
|
||||
}
|
||||
r.images = append(r.images, image)
|
||||
r.idindex.Add(id)
|
||||
r.byid[id] = image
|
||||
if searchableDigest != "" {
|
||||
list := r.bydigest[searchableDigest]
|
||||
r.bydigest[searchableDigest] = append(list, image)
|
||||
}
|
||||
for _, name := range names {
|
||||
r.byname[name] = image
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
list := r.bydigest[digest]
|
||||
r.bydigest[digest] = append(list, image)
|
||||
}
|
||||
err = r.Save()
|
||||
image = copyImage(image)
|
||||
}
|
||||
@@ -442,6 +518,14 @@ func (r *imageStore) Delete(id string) error {
|
||||
for _, name := range image.Names {
|
||||
delete(r.byname, name)
|
||||
}
|
||||
for _, digest := range image.Digests {
|
||||
prunedList := imageSliceWithoutValue(r.bydigest[digest], image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
r.bydigest[digest] = prunedList
|
||||
}
|
||||
}
|
||||
if toDeleteIndex != -1 {
|
||||
// delete the image at toDeleteIndex
|
||||
if toDeleteIndex == len(r.images)-1 {
|
||||
@@ -450,28 +534,6 @@ func (r *imageStore) Delete(id string) error {
|
||||
r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...)
|
||||
}
|
||||
}
|
||||
if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok {
|
||||
// remove the image from the digest-based index
|
||||
if list, ok := r.bydigest[digest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, digest)
|
||||
} else {
|
||||
r.bydigest[digest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if image.Digest != "" {
|
||||
// remove the image's hard-coded digest from the digest-based index
|
||||
if list, ok := r.bydigest[image.Digest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, image.Digest)
|
||||
} else {
|
||||
r.bydigest[image.Digest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := r.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -502,7 +564,7 @@ func (r *imageStore) Exists(id string) bool {
|
||||
|
||||
func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) {
|
||||
if images, ok := r.bydigest[d]; ok {
|
||||
return images, nil
|
||||
return copyImageSlice(images), nil
|
||||
}
|
||||
return nil, ErrImageUnknown
|
||||
}
|
||||
@@ -604,10 +666,19 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
if !ok {
|
||||
return ErrImageUnknown
|
||||
}
|
||||
if err := os.MkdirAll(r.datadir(image.ID), 0700); err != nil {
|
||||
err := os.MkdirAll(r.datadir(image.ID), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
|
||||
var newDigest digest.Digest
|
||||
if bigDataNameIsManifest(key) {
|
||||
if newDigest, err = manifest.Digest(data); err != nil {
|
||||
return errors.Wrapf(err, "error digesting manifest")
|
||||
}
|
||||
} else {
|
||||
newDigest = digest.Canonical.FromBytes(data)
|
||||
}
|
||||
err = ioutils.AtomicWriteFile(r.datapath(image.ID, key), data, 0600)
|
||||
if err == nil {
|
||||
save := false
|
||||
if image.BigDataSizes == nil {
|
||||
@@ -619,7 +690,6 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
image.BigDataDigests = make(map[string]digest.Digest)
|
||||
}
|
||||
oldDigest, digestOk := image.BigDataDigests[key]
|
||||
newDigest := digest.Canonical.FromBytes(data)
|
||||
image.BigDataDigests[key] = newDigest
|
||||
if !sizeOk || oldSize != image.BigDataSizes[key] || !digestOk || oldDigest != newDigest {
|
||||
save = true
|
||||
@@ -635,20 +705,21 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error {
|
||||
image.BigDataNames = append(image.BigDataNames, key)
|
||||
save = true
|
||||
}
|
||||
if key == ImageDigestBigDataKey {
|
||||
if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest {
|
||||
// remove the image from the list of images in the digest-based
|
||||
// index which corresponds to the old digest for this item, unless
|
||||
// it's also the hard-coded digest
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
r.bydigest[oldDigest] = prunedList
|
||||
}
|
||||
for _, oldDigest := range image.Digests {
|
||||
// remove the image from the list of images in the digest-based index
|
||||
if list, ok := r.bydigest[oldDigest]; ok {
|
||||
prunedList := imageSliceWithoutValue(list, image)
|
||||
if len(prunedList) == 0 {
|
||||
delete(r.bydigest, oldDigest)
|
||||
} else {
|
||||
r.bydigest[oldDigest] = prunedList
|
||||
}
|
||||
}
|
||||
}
|
||||
if err = image.recomputeDigests(); err != nil {
|
||||
return errors.Wrapf(err, "error loading recomputing image digest information for %s", image.ID)
|
||||
}
|
||||
for _, newDigest := range image.Digests {
|
||||
// add the image to the list of images in the digest-based index which
|
||||
// corresponds to the new digest for this item, unless it's already there
|
||||
list := r.bydigest[newDigest]
|
||||
@@ -685,6 +756,10 @@ func (r *imageStore) Lock() {
|
||||
r.lockfile.Lock()
|
||||
}
|
||||
|
||||
func (r *imageStore) RLock() {
|
||||
r.lockfile.RLock()
|
||||
}
|
||||
|
||||
func (r *imageStore) Unlock() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
|
||||
246
vendor/github.com/containers/storage/layers.go
generated
vendored
246
vendor/github.com/containers/storage/layers.go
generated
vendored
@@ -229,6 +229,7 @@ type LayerStore interface {
|
||||
|
||||
type layerStore struct {
|
||||
lockfile Locker
|
||||
mountsLockfile Locker
|
||||
rundir string
|
||||
driver drivers.Driver
|
||||
layerdir string
|
||||
@@ -291,7 +292,6 @@ func (r *layerStore) Load() error {
|
||||
idlist := []string{}
|
||||
ids := make(map[string]*Layer)
|
||||
names := make(map[string]*Layer)
|
||||
mounts := make(map[string]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
if r.lockfile.IsReadWrite() {
|
||||
@@ -319,39 +319,29 @@ func (r *layerStore) Load() error {
|
||||
label.ReserveLabel(layer.MountLabel)
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
if shouldSave && !r.IsReadWrite() {
|
||||
if shouldSave && (!r.IsReadWrite() || !r.Locked()) {
|
||||
return ErrDuplicateLayerNames
|
||||
}
|
||||
mpath := r.mountspath()
|
||||
data, err = ioutil.ReadFile(mpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
layerMounts := []layerMountPoint{}
|
||||
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
|
||||
for _, mount := range layerMounts {
|
||||
if mount.MountPoint != "" {
|
||||
if layer, ok := ids[mount.ID]; ok {
|
||||
mounts[mount.MountPoint] = layer
|
||||
layer.MountPoint = mount.MountPoint
|
||||
layer.MountCount = mount.MountCount
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
r.layers = layers
|
||||
r.idindex = truncindex.NewTruncIndex(idlist)
|
||||
r.byid = ids
|
||||
r.byname = names
|
||||
r.bymount = mounts
|
||||
r.bycompressedsum = compressedsums
|
||||
r.byuncompressedsum = uncompressedsums
|
||||
err = nil
|
||||
// Load and merge information about which layers are mounted, and where.
|
||||
if r.IsReadWrite() {
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if err = r.loadMounts(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Last step: if we're writable, try to remove anything that a previous
|
||||
// user of this storage area marked for deletion but didn't manage to
|
||||
// actually delete.
|
||||
if r.IsReadWrite() {
|
||||
if r.IsReadWrite() && r.Locked() {
|
||||
for _, layer := range r.layers {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
@@ -373,12 +363,36 @@ func (r *layerStore) Load() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *layerStore) loadMounts() error {
|
||||
mounts := make(map[string]*Layer)
|
||||
mpath := r.mountspath()
|
||||
data, err := ioutil.ReadFile(mpath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
layerMounts := []layerMountPoint{}
|
||||
if err = json.Unmarshal(data, &layerMounts); len(data) == 0 || err == nil {
|
||||
for _, mount := range layerMounts {
|
||||
if mount.MountPoint != "" {
|
||||
if layer, ok := r.lookup(mount.ID); ok {
|
||||
mounts[mount.MountPoint] = layer
|
||||
layer.MountPoint = mount.MountPoint
|
||||
layer.MountCount = mount.MountCount
|
||||
}
|
||||
}
|
||||
}
|
||||
err = nil
|
||||
}
|
||||
r.bymount = mounts
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *layerStore) Save() error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
||||
}
|
||||
if !r.Locked() {
|
||||
return errors.New("layer store is not locked")
|
||||
return errors.New("layer store is not locked for writing")
|
||||
}
|
||||
rpath := r.layerspath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
@@ -388,6 +402,25 @@ func (r *layerStore) Save() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
if !r.IsReadWrite() {
|
||||
return nil
|
||||
}
|
||||
r.mountsLockfile.Lock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
defer r.mountsLockfile.Touch()
|
||||
return r.saveMounts()
|
||||
}
|
||||
|
||||
func (r *layerStore) saveMounts() error {
|
||||
if !r.IsReadWrite() {
|
||||
return errors.Wrapf(ErrStoreIsReadOnly, "not allowed to modify the layer store at %q", r.layerspath())
|
||||
}
|
||||
if !r.mountsLockfile.Locked() {
|
||||
return errors.New("layer store mount information is not locked for writing")
|
||||
}
|
||||
mpath := r.mountspath()
|
||||
if err := os.MkdirAll(filepath.Dir(mpath), 0700); err != nil {
|
||||
return err
|
||||
@@ -406,11 +439,10 @@ func (r *layerStore) Save() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(rpath, jldata, 0600); err != nil {
|
||||
if err = ioutils.AtomicWriteFile(mpath, jmdata, 0600); err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Touch()
|
||||
return ioutils.AtomicWriteFile(mpath, jmdata, 0600)
|
||||
return r.loadMounts()
|
||||
}
|
||||
|
||||
func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap, gidMap []idtools.IDMap) (LayerStore, error) {
|
||||
@@ -426,16 +458,21 @@ func newLayerStore(rundir string, layerdir string, driver drivers.Driver, uidMap
|
||||
}
|
||||
lockfile.Lock()
|
||||
defer lockfile.Unlock()
|
||||
mountsLockfile, err := GetLockfile(filepath.Join(rundir, "mountpoints.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rlstore := layerStore{
|
||||
lockfile: lockfile,
|
||||
driver: driver,
|
||||
rundir: rundir,
|
||||
layerdir: layerdir,
|
||||
byid: make(map[string]*Layer),
|
||||
bymount: make(map[string]*Layer),
|
||||
byname: make(map[string]*Layer),
|
||||
uidMap: copyIDMap(uidMap),
|
||||
gidMap: copyIDMap(gidMap),
|
||||
lockfile: lockfile,
|
||||
mountsLockfile: mountsLockfile,
|
||||
driver: driver,
|
||||
rundir: rundir,
|
||||
layerdir: layerdir,
|
||||
byid: make(map[string]*Layer),
|
||||
bymount: make(map[string]*Layer),
|
||||
byname: make(map[string]*Layer),
|
||||
uidMap: copyIDMap(uidMap),
|
||||
gidMap: copyIDMap(gidMap),
|
||||
}
|
||||
if err := rlstore.Load(); err != nil {
|
||||
return nil, err
|
||||
@@ -451,13 +488,14 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (ROL
|
||||
lockfile.Lock()
|
||||
defer lockfile.Unlock()
|
||||
rlstore := layerStore{
|
||||
lockfile: lockfile,
|
||||
driver: driver,
|
||||
rundir: rundir,
|
||||
layerdir: layerdir,
|
||||
byid: make(map[string]*Layer),
|
||||
bymount: make(map[string]*Layer),
|
||||
byname: make(map[string]*Layer),
|
||||
lockfile: lockfile,
|
||||
mountsLockfile: nil,
|
||||
driver: driver,
|
||||
rundir: rundir,
|
||||
layerdir: layerdir,
|
||||
byid: make(map[string]*Layer),
|
||||
bymount: make(map[string]*Layer),
|
||||
byname: make(map[string]*Layer),
|
||||
}
|
||||
if err := rlstore.Load(); err != nil {
|
||||
return nil, err
|
||||
@@ -551,9 +589,20 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
||||
}
|
||||
}
|
||||
parent := ""
|
||||
var parentMappings *idtools.IDMappings
|
||||
if parentLayer != nil {
|
||||
parent = parentLayer.ID
|
||||
}
|
||||
var parentMappings, templateIDMappings, oldMappings *idtools.IDMappings
|
||||
if moreOptions.TemplateLayer != "" {
|
||||
templateLayer, ok := r.lookup(moreOptions.TemplateLayer)
|
||||
if !ok {
|
||||
return nil, -1, ErrLayerUnknown
|
||||
}
|
||||
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
|
||||
} else {
|
||||
templateIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if parentLayer != nil {
|
||||
parentMappings = idtools.NewIDMappingsFromMaps(parentLayer.UIDMap, parentLayer.GIDMap)
|
||||
} else {
|
||||
parentMappings = &idtools.IDMappings{}
|
||||
@@ -566,23 +615,34 @@ func (r *layerStore) Put(id string, parentLayer *Layer, names []string, mountLab
|
||||
MountLabel: mountLabel,
|
||||
StorageOpt: options,
|
||||
}
|
||||
if writeable {
|
||||
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
|
||||
if moreOptions.TemplateLayer != "" {
|
||||
if err = r.driver.CreateFromTemplate(id, moreOptions.TemplateLayer, templateIDMappings, parent, parentMappings, &opts, writeable); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
|
||||
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q with ID %q", moreOptions.TemplateLayer, id)
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer")
|
||||
return nil, -1, errors.Wrapf(err, "error creating copy of template layer %q", moreOptions.TemplateLayer)
|
||||
}
|
||||
oldMappings = templateIDMappings
|
||||
} else {
|
||||
if err = r.driver.Create(id, parent, &opts); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
|
||||
if writeable {
|
||||
if err = r.driver.CreateReadWrite(id, parent, &opts); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer with ID %q", id)
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating read-write layer")
|
||||
}
|
||||
} else {
|
||||
if err = r.driver.Create(id, parent, &opts); err != nil {
|
||||
if id != "" {
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer with ID %q", id)
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer")
|
||||
}
|
||||
return nil, -1, errors.Wrapf(err, "error creating layer")
|
||||
}
|
||||
oldMappings = parentMappings
|
||||
}
|
||||
if !reflect.DeepEqual(parentMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(parentMappings.GIDs(), idMappings.GIDs()) {
|
||||
if err = r.driver.UpdateLayerIDMap(id, parentMappings, idMappings, mountLabel); err != nil {
|
||||
if !reflect.DeepEqual(oldMappings.UIDs(), idMappings.UIDs()) || !reflect.DeepEqual(oldMappings.GIDs(), idMappings.GIDs()) {
|
||||
if err = r.driver.UpdateLayerIDMap(id, oldMappings, idMappings, mountLabel); err != nil {
|
||||
// We don't have a record of this layer, but at least
|
||||
// try to clean it up underneath us.
|
||||
r.driver.Remove(id)
|
||||
@@ -651,6 +711,16 @@ func (r *layerStore) Create(id string, parent *Layer, names []string, mountLabel
|
||||
}
|
||||
|
||||
func (r *layerStore) Mounted(id string) (int, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return 0, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
|
||||
}
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||
if err = r.loadMounts(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return 0, ErrLayerUnknown
|
||||
@@ -662,13 +732,21 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
||||
if !r.IsReadWrite() {
|
||||
return "", errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
r.mountsLockfile.Lock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||
if err = r.loadMounts(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
defer r.mountsLockfile.Touch()
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return "", ErrLayerUnknown
|
||||
}
|
||||
if layer.MountCount > 0 {
|
||||
layer.MountCount++
|
||||
return layer.MountPoint, r.Save()
|
||||
return layer.MountPoint, r.saveMounts()
|
||||
}
|
||||
if options.MountLabel == "" {
|
||||
options.MountLabel = layer.MountLabel
|
||||
@@ -687,7 +765,7 @@ func (r *layerStore) Mount(id string, options drivers.MountOpts) (string, error)
|
||||
layer.MountPoint = filepath.Clean(mountpoint)
|
||||
layer.MountCount++
|
||||
r.bymount[layer.MountPoint] = layer
|
||||
err = r.Save()
|
||||
err = r.saveMounts()
|
||||
}
|
||||
return mountpoint, err
|
||||
}
|
||||
@@ -696,6 +774,14 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
||||
if !r.IsReadWrite() {
|
||||
return false, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to update mount locations for layers at %q", r.mountspath())
|
||||
}
|
||||
r.mountsLockfile.Lock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||
if err = r.loadMounts(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
defer r.mountsLockfile.Touch()
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
layerByMount, ok := r.bymount[filepath.Clean(id)]
|
||||
@@ -709,7 +795,7 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
||||
}
|
||||
if layer.MountCount > 1 {
|
||||
layer.MountCount--
|
||||
return true, r.Save()
|
||||
return true, r.saveMounts()
|
||||
}
|
||||
err := r.driver.Put(id)
|
||||
if err == nil || os.IsNotExist(err) {
|
||||
@@ -718,12 +804,22 @@ func (r *layerStore) Unmount(id string, force bool) (bool, error) {
|
||||
}
|
||||
layer.MountCount--
|
||||
layer.MountPoint = ""
|
||||
return false, r.Save()
|
||||
return false, r.saveMounts()
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
func (r *layerStore) ParentOwners(id string) (uids, gids []int, err error) {
|
||||
if !r.IsReadWrite() {
|
||||
return nil, nil, errors.Wrapf(ErrStoreIsReadOnly, "no mount information for layers at %q", r.mountspath())
|
||||
}
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
if modified, err := r.mountsLockfile.Modified(); modified || err != nil {
|
||||
if err = r.loadMounts(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
layer, ok := r.lookup(id)
|
||||
if !ok {
|
||||
return nil, nil, ErrLayerUnknown
|
||||
@@ -840,14 +936,23 @@ func (r *layerStore) Delete(id string) error {
|
||||
return ErrLayerUnknown
|
||||
}
|
||||
id = layer.ID
|
||||
// This check is needed for idempotency of delete where the layer could have been
|
||||
// already unmounted (since c/storage gives you that API directly)
|
||||
for layer.MountCount > 0 {
|
||||
// The layer may already have been explicitly unmounted, but if not, we
|
||||
// should try to clean that up before we start deleting anything at the
|
||||
// driver level.
|
||||
mountCount, err := r.Mounted(id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
|
||||
}
|
||||
for mountCount > 0 {
|
||||
if _, err := r.Unmount(id, false); err != nil {
|
||||
return err
|
||||
}
|
||||
mountCount, err = r.Mounted(id)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking if layer %q is still mounted", id)
|
||||
}
|
||||
}
|
||||
err := r.driver.Remove(id)
|
||||
err = r.driver.Remove(id)
|
||||
if err == nil {
|
||||
os.Remove(r.tspath(id))
|
||||
delete(r.byid, id)
|
||||
@@ -1200,6 +1305,10 @@ func (r *layerStore) Lock() {
|
||||
r.lockfile.Lock()
|
||||
}
|
||||
|
||||
func (r *layerStore) RLock() {
|
||||
r.lockfile.RLock()
|
||||
}
|
||||
|
||||
func (r *layerStore) Unlock() {
|
||||
r.lockfile.Unlock()
|
||||
}
|
||||
@@ -1209,7 +1318,20 @@ func (r *layerStore) Touch() error {
|
||||
}
|
||||
|
||||
func (r *layerStore) Modified() (bool, error) {
|
||||
return r.lockfile.Modified()
|
||||
var mmodified bool
|
||||
lmodified, err := r.lockfile.Modified()
|
||||
if err != nil {
|
||||
return lmodified, err
|
||||
}
|
||||
if r.IsReadWrite() {
|
||||
r.mountsLockfile.RLock()
|
||||
defer r.mountsLockfile.Unlock()
|
||||
mmodified, err = r.mountsLockfile.Modified()
|
||||
if err != nil {
|
||||
return lmodified, err
|
||||
}
|
||||
}
|
||||
return lmodified || mmodified, nil
|
||||
}
|
||||
|
||||
func (r *layerStore) IsReadWrite() bool {
|
||||
|
||||
59
vendor/github.com/containers/storage/lockfile.go
generated
vendored
59
vendor/github.com/containers/storage/lockfile.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -13,7 +12,14 @@ import (
|
||||
// identifier of the last party that made changes to whatever's being protected
|
||||
// by the lock.
|
||||
type Locker interface {
|
||||
sync.Locker
|
||||
// Acquire a writer lock.
|
||||
Lock()
|
||||
|
||||
// Unlock the lock.
|
||||
Unlock()
|
||||
|
||||
// Acquire a reader lock.
|
||||
RLock()
|
||||
|
||||
// Touch records, for others sharing the lock, that the caller was the
|
||||
// last writer. It should only be called with the lock held.
|
||||
@@ -29,7 +35,7 @@ type Locker interface {
|
||||
// IsReadWrite() checks if the lock file is read-write
|
||||
IsReadWrite() bool
|
||||
|
||||
// Locked() checks if lock is locked
|
||||
// Locked() checks if lock is locked for writing by a thread in this process
|
||||
Locked() bool
|
||||
}
|
||||
|
||||
@@ -39,44 +45,41 @@ var (
|
||||
)
|
||||
|
||||
// GetLockfile opens a read-write lock file, creating it if necessary. The
|
||||
// Locker object it returns will be returned unlocked.
|
||||
// Locker object may already be locked if the path has already been requested
|
||||
// by the current process.
|
||||
func GetLockfile(path string) (Locker, error) {
|
||||
lockfilesLock.Lock()
|
||||
defer lockfilesLock.Unlock()
|
||||
if lockfiles == nil {
|
||||
lockfiles = make(map[string]Locker)
|
||||
}
|
||||
cleanPath := filepath.Clean(path)
|
||||
if locker, ok := lockfiles[cleanPath]; ok {
|
||||
if !locker.IsReadWrite() {
|
||||
return nil, errors.Wrapf(ErrLockReadOnly, "lock %q is a read-only lock", cleanPath)
|
||||
}
|
||||
return locker, nil
|
||||
}
|
||||
locker, err := getLockFile(path, false) // platform dependent locker
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockfiles[filepath.Clean(path)] = locker
|
||||
return locker, nil
|
||||
return getLockfile(path, false)
|
||||
}
|
||||
|
||||
// GetROLockfile opens a read-only lock file. The Locker object it returns
|
||||
// will be returned unlocked.
|
||||
// GetROLockfile opens a read-only lock file, creating it if necessary. The
|
||||
// Locker object may already be locked if the path has already been requested
|
||||
// by the current process.
|
||||
func GetROLockfile(path string) (Locker, error) {
|
||||
return getLockfile(path, true)
|
||||
}
|
||||
|
||||
// getLockfile is a helper for GetLockfile and GetROLockfile and returns Locker
|
||||
// based on the path and read-only property.
|
||||
func getLockfile(path string, ro bool) (Locker, error) {
|
||||
lockfilesLock.Lock()
|
||||
defer lockfilesLock.Unlock()
|
||||
if lockfiles == nil {
|
||||
lockfiles = make(map[string]Locker)
|
||||
}
|
||||
cleanPath := filepath.Clean(path)
|
||||
cleanPath, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error ensuring that path %q is an absolute path", path)
|
||||
}
|
||||
if locker, ok := lockfiles[cleanPath]; ok {
|
||||
if locker.IsReadWrite() {
|
||||
return nil, fmt.Errorf("lock %q is a read-write lock", cleanPath)
|
||||
if ro && locker.IsReadWrite() {
|
||||
return nil, errors.Errorf("lock %q is not a read-only lock", cleanPath)
|
||||
}
|
||||
if !ro && !locker.IsReadWrite() {
|
||||
return nil, errors.Errorf("lock %q is not a read-write lock", cleanPath)
|
||||
}
|
||||
return locker, nil
|
||||
}
|
||||
locker, err := getLockFile(path, true) // platform dependent locker
|
||||
locker, err := getLockFile(path, ro) // platform dependent locker
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
134
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
134
vendor/github.com/containers/storage/lockfile_unix.go
generated
vendored
@@ -3,6 +3,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -24,38 +25,85 @@ func getLockFile(path string, ro bool) (Locker, error) {
|
||||
return nil, errors.Wrapf(err, "error opening %q", path)
|
||||
}
|
||||
unix.CloseOnExec(fd)
|
||||
|
||||
locktype := unix.F_WRLCK
|
||||
if ro {
|
||||
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_RDLCK, locked: false}, nil
|
||||
locktype = unix.F_RDLCK
|
||||
}
|
||||
return &lockfile{file: path, fd: uintptr(fd), lw: stringid.GenerateRandomID(), locktype: unix.F_WRLCK, locked: false}, nil
|
||||
return &lockfile{
|
||||
stateMutex: &sync.Mutex{},
|
||||
rwMutex: &sync.RWMutex{},
|
||||
file: path,
|
||||
fd: uintptr(fd),
|
||||
lw: stringid.GenerateRandomID(),
|
||||
locktype: int16(locktype),
|
||||
locked: false,
|
||||
ro: ro}, nil
|
||||
}
|
||||
|
||||
type lockfile struct {
|
||||
mu sync.Mutex
|
||||
file string
|
||||
fd uintptr
|
||||
lw string
|
||||
locktype int16
|
||||
locked bool
|
||||
// rwMutex serializes concurrent reader-writer acquisitions in the same process space
|
||||
rwMutex *sync.RWMutex
|
||||
// stateMutex is used to synchronize concurrent accesses to the state below
|
||||
stateMutex *sync.Mutex
|
||||
counter int64
|
||||
file string
|
||||
fd uintptr
|
||||
lw string
|
||||
locktype int16
|
||||
locked bool
|
||||
ro bool
|
||||
}
|
||||
|
||||
// Lock locks the lock file
|
||||
func (l *lockfile) Lock() {
|
||||
// lock locks the lockfile via FCTNL(2) based on the specified type and
|
||||
// command.
|
||||
func (l *lockfile) lock(l_type int16) {
|
||||
lk := unix.Flock_t{
|
||||
Type: l.locktype,
|
||||
Type: l_type,
|
||||
Whence: int16(os.SEEK_SET),
|
||||
Start: 0,
|
||||
Len: 0,
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
l.mu.Lock()
|
||||
switch l_type {
|
||||
case unix.F_RDLCK:
|
||||
l.rwMutex.RLock()
|
||||
case unix.F_WRLCK:
|
||||
l.rwMutex.Lock()
|
||||
default:
|
||||
panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", l_type))
|
||||
}
|
||||
l.stateMutex.Lock()
|
||||
if l.counter == 0 {
|
||||
// Optimization: only use the (expensive) fcntl syscall when
|
||||
// the counter is 0. In this case, we're either the first
|
||||
// reader lock or a writer lock.
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
l.locktype = l_type
|
||||
l.locked = true
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
l.counter++
|
||||
l.stateMutex.Unlock()
|
||||
}
|
||||
|
||||
// Lock locks the lockfile as a writer. Note that RLock() will be called if
|
||||
// the lock is a read-only one.
|
||||
func (l *lockfile) Lock() {
|
||||
if l.ro {
|
||||
l.RLock()
|
||||
} else {
|
||||
l.lock(unix.F_WRLCK)
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks the lock file
|
||||
// LockRead locks the lockfile as a reader.
|
||||
func (l *lockfile) RLock() {
|
||||
l.lock(unix.F_RDLCK)
|
||||
}
|
||||
|
||||
// Unlock unlocks the lockfile.
|
||||
func (l *lockfile) Unlock() {
|
||||
lk := unix.Flock_t{
|
||||
Type: unix.F_UNLCK,
|
||||
@@ -64,20 +112,50 @@ func (l *lockfile) Unlock() {
|
||||
Len: 0,
|
||||
Pid: int32(os.Getpid()),
|
||||
}
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
l.stateMutex.Lock()
|
||||
if l.locked == false {
|
||||
// Panic when unlocking an unlocked lock. That's a violation
|
||||
// of the lock semantics and will reveal such.
|
||||
panic("calling Unlock on unlocked lock")
|
||||
}
|
||||
l.locked = false
|
||||
l.mu.Unlock()
|
||||
l.counter--
|
||||
if l.counter < 0 {
|
||||
// Panic when the counter is negative. There is no way we can
|
||||
// recover from a corrupted lock and we need to protect the
|
||||
// storage from corruption.
|
||||
panic(fmt.Sprintf("lock %q has been unlocked too often", l.file))
|
||||
}
|
||||
if l.counter == 0 {
|
||||
// We should only release the lock when the counter is 0 to
|
||||
// avoid releasing read-locks too early; a given process may
|
||||
// acquire a read lock multiple times.
|
||||
l.locked = false
|
||||
for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
if l.locktype == unix.F_RDLCK {
|
||||
l.rwMutex.RUnlock()
|
||||
} else {
|
||||
l.rwMutex.Unlock()
|
||||
}
|
||||
l.stateMutex.Unlock()
|
||||
}
|
||||
|
||||
// Check if lock is locked
|
||||
// Locked checks if lockfile is locked for writing by a thread in this process.
|
||||
func (l *lockfile) Locked() bool {
|
||||
return l.locked
|
||||
l.stateMutex.Lock()
|
||||
defer l.stateMutex.Unlock()
|
||||
return l.locked && (l.locktype == unix.F_WRLCK)
|
||||
}
|
||||
|
||||
// Touch updates the lock file with the UID of the user
|
||||
// Touch updates the lock file with the UID of the user.
|
||||
func (l *lockfile) Touch() error {
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked || (l.locktype != unix.F_WRLCK) {
|
||||
panic("attempted to update last-writer in lockfile without the write lock")
|
||||
}
|
||||
l.stateMutex.Unlock()
|
||||
l.lw = stringid.GenerateRandomID()
|
||||
id := []byte(l.lw)
|
||||
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
|
||||
@@ -98,9 +176,15 @@ func (l *lockfile) Touch() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Modified indicates if the lock file has been updated since the last time it was loaded
|
||||
// Modified indicates if the lockfile has been updated since the last time it
|
||||
// was loaded.
|
||||
func (l *lockfile) Modified() (bool, error) {
|
||||
id := []byte(l.lw)
|
||||
l.stateMutex.Lock()
|
||||
if !l.locked {
|
||||
panic("attempted to check last-writer in lockfile without locking it first")
|
||||
}
|
||||
l.stateMutex.Unlock()
|
||||
_, err := unix.Seek(int(l.fd), 0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return true, err
|
||||
@@ -117,7 +201,7 @@ func (l *lockfile) Modified() (bool, error) {
|
||||
return l.lw != lw, nil
|
||||
}
|
||||
|
||||
// IsRWLock indicates if the lock file is a read-write lock
|
||||
// IsReadWriteLock indicates if the lock file is a read-write lock.
|
||||
func (l *lockfile) IsReadWrite() bool {
|
||||
return (l.locktype == unix.F_WRLCK)
|
||||
return !l.ro
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/storage/lockfile_windows.go
generated
vendored
5
vendor/github.com/containers/storage/lockfile_windows.go
generated
vendored
@@ -23,6 +23,11 @@ func (l *lockfile) Lock() {
|
||||
l.locked = true
|
||||
}
|
||||
|
||||
func (l *lockfile) RLock() {
|
||||
l.mu.Lock()
|
||||
l.locked = true
|
||||
}
|
||||
|
||||
func (l *lockfile) Unlock() {
|
||||
l.locked = false
|
||||
l.mu.Unlock()
|
||||
|
||||
107
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
107
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
"github.com/containers/storage/pkg/system"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -1331,3 +1333,108 @@ const (
|
||||
// HeaderSize is the size in bytes of a tar header
|
||||
HeaderSize = 512
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *Archiver {
|
||||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
|
||||
}
|
||||
|
||||
// NewArchiverWithChown returns a new Archiver which uses Untar and the provided ID mapping configuration on both ends
|
||||
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *Archiver {
|
||||
if tarIDMappings == nil {
|
||||
tarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if untarIDMappings == nil {
|
||||
untarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
|
||||
}
|
||||
|
||||
// CopyFileWithTarAndChown returns a function which copies a single file from outside
|
||||
// of any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
contentReader, contentWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
defer contentWriter.Close()
|
||||
var hashError error
|
||||
var hashWorker sync.WaitGroup
|
||||
hashWorker.Add(1)
|
||||
go func() {
|
||||
t := tar.NewReader(contentReader)
|
||||
_, err := t.Next()
|
||||
if err != nil {
|
||||
hashError = err
|
||||
}
|
||||
if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
|
||||
hashError = err
|
||||
}
|
||||
hashWorker.Done()
|
||||
}()
|
||||
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
|
||||
err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return archiver.CopyFileWithTar
|
||||
}
|
||||
|
||||
// CopyWithTarAndChown returns a function which copies a directory tree from outside of
|
||||
// any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.CopyWithTar
|
||||
}
|
||||
|
||||
// UntarPathAndChown returns a function which extracts an archive in a specified
|
||||
// location into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.UntarPath
|
||||
}
|
||||
|
||||
// TarPath returns a function which creates an archive of a specified
|
||||
// location in the container's filesystem, mapping permissions using the
|
||||
// container's ID maps
|
||||
func TarPath(uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(path string) (io.ReadCloser, error) {
|
||||
tarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
return func(path string) (io.ReadCloser, error) {
|
||||
return TarWithOptions(path, &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
UIDMaps: tarMappings.UIDs(),
|
||||
GIDMaps: tarMappings.GIDs(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/archive/archive_ffjson.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: ./pkg/archive/archive.go
|
||||
// source: pkg/archive/archive.go
|
||||
|
||||
package archive
|
||||
|
||||
|
||||
92
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
92
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@@ -1,34 +1,32 @@
|
||||
package chrootarchive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{Untar: Untar, TarIDMappings: idMappings, UntarIDMappings: idMappings}
|
||||
archiver := archive.NewArchiver(idMappings)
|
||||
archiver.Untar = Untar
|
||||
return archiver
|
||||
}
|
||||
|
||||
// NewArchiverWithChown returns a new Archiver which uses chrootarchive.Untar and the provided ID mapping configuration on both ends
|
||||
func NewArchiverWithChown(tarIDMappings *idtools.IDMappings, chownOpts *idtools.IDPair, untarIDMappings *idtools.IDMappings) *archive.Archiver {
|
||||
if tarIDMappings == nil {
|
||||
tarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
if untarIDMappings == nil {
|
||||
untarIDMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{Untar: Untar, TarIDMappings: tarIDMappings, ChownOpts: chownOpts, UntarIDMappings: untarIDMappings}
|
||||
archiver := archive.NewArchiverWithChown(tarIDMappings, chownOpts, untarIDMappings)
|
||||
archiver.Untar = Untar
|
||||
return archiver
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
@@ -81,3 +79,75 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||
|
||||
return invokeUnpack(r, dest, options)
|
||||
}
|
||||
|
||||
// CopyFileWithTarAndChown returns a function which copies a single file from outside
|
||||
// of any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyFileWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
contentReader, contentWriter, err := os.Pipe()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating pipe extract data to %q", dest)
|
||||
}
|
||||
defer contentReader.Close()
|
||||
defer contentWriter.Close()
|
||||
var hashError error
|
||||
var hashWorker sync.WaitGroup
|
||||
hashWorker.Add(1)
|
||||
go func() {
|
||||
t := tar.NewReader(contentReader)
|
||||
_, err := t.Next()
|
||||
if err != nil {
|
||||
hashError = err
|
||||
}
|
||||
if _, err = io.Copy(hasher, t); err != nil && err != io.EOF {
|
||||
hashError = err
|
||||
}
|
||||
hashWorker.Done()
|
||||
}()
|
||||
if err = originalUntar(io.TeeReader(tarArchive, contentWriter), dest, options); err != nil {
|
||||
err = errors.Wrapf(err, "error extracting data to %q while copying", dest)
|
||||
}
|
||||
hashWorker.Wait()
|
||||
if err == nil {
|
||||
err = errors.Wrapf(hashError, "error calculating digest of data for %q while copying", dest)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return archiver.CopyFileWithTar
|
||||
}
|
||||
|
||||
// CopyWithTarAndChown returns a function which copies a directory tree from outside of
|
||||
// any container into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func CopyWithTarAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.CopyWithTar
|
||||
}
|
||||
|
||||
// UntarPathAndChown returns a function which extracts an archive in a specified
|
||||
// location into our working container, mapping permissions using the
|
||||
// container's ID maps, possibly overridden using the passed-in chownOpts
|
||||
func UntarPathAndChown(chownOpts *idtools.IDPair, hasher io.Writer, uidmap []idtools.IDMap, gidmap []idtools.IDMap) func(src, dest string) error {
|
||||
untarMappings := idtools.NewIDMappingsFromMaps(uidmap, gidmap)
|
||||
archiver := NewArchiverWithChown(nil, chownOpts, untarMappings)
|
||||
if hasher != nil {
|
||||
originalUntar := archiver.Untar
|
||||
archiver.Untar = func(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return originalUntar(io.TeeReader(tarArchive, hasher), dest, options)
|
||||
}
|
||||
}
|
||||
return archiver.UntarPath
|
||||
}
|
||||
|
||||
96
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
Normal file
96
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
package config
|
||||
|
||||
// ThinpoolOptionsConfig represents the "storage.options.thinpool"
|
||||
// TOML config table.
|
||||
type ThinpoolOptionsConfig struct {
|
||||
// AutoExtendPercent determines the amount by which pool needs to be
|
||||
// grown. This is specified in terms of % of pool size. So a value of
|
||||
// 20 means that when threshold is hit, pool will be grown by 20% of
|
||||
// existing pool size.
|
||||
AutoExtendPercent string `toml:"autoextend_percent"`
|
||||
|
||||
// AutoExtendThreshold determines the pool extension threshold in terms
|
||||
// of percentage of pool size. For example, if threshold is 60, that
|
||||
// means when pool is 60% full, threshold has been hit.
|
||||
AutoExtendThreshold string `toml:"autoextend_threshold"`
|
||||
|
||||
// BaseSize specifies the size to use when creating the base device,
|
||||
// which limits the size of images and containers.
|
||||
BaseSize string `toml:"basesize"`
|
||||
|
||||
// BlockSize specifies a custom blocksize to use for the thin pool.
|
||||
BlockSize string `toml:"blocksize"`
|
||||
|
||||
// DirectLvmDevice specifies a custom block storage device to use for
|
||||
// the thin pool.
|
||||
DirectLvmDevice string `toml:"directlvm_device"`
|
||||
|
||||
// DirectLvmDeviceForcewipes device even if device already has a
|
||||
// filesystem
|
||||
DirectLvmDeviceForce string `toml:"directlvm_device_force"`
|
||||
|
||||
// Fs specifies the filesystem type to use for the base device.
|
||||
Fs string `toml:"fs"`
|
||||
|
||||
// log_level sets the log level of devicemapper.
|
||||
LogLevel string `toml:"log_level"`
|
||||
|
||||
// MinFreeSpace specifies the min free space percent in a thin pool
|
||||
// require for new device creation to
|
||||
MinFreeSpace string `toml:"min_free_space"`
|
||||
|
||||
// MkfsArg specifies extra mkfs arguments to be used when creating the
|
||||
// basedevice.
|
||||
MkfsArg string `toml:"mkfsarg"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting the thin
|
||||
// devices.
|
||||
MountOpt string `toml:"mountopt"`
|
||||
|
||||
// UseDeferredDeletion marks device for deferred deletion
|
||||
UseDeferredDeletion string `toml:"use_deferred_deletion"`
|
||||
|
||||
// UseDeferredRemoval marks device for deferred removal
|
||||
UseDeferredRemoval string `toml:"use_deferred_removal"`
|
||||
|
||||
// XfsNoSpaceMaxRetriesFreeSpace specifies the maximum number of
|
||||
// retries XFS should attempt to complete IO when ENOSPC (no space)
|
||||
// error is returned by underlying storage device.
|
||||
XfsNoSpaceMaxRetries string `toml:"xfs_nospace_max_retries"`
|
||||
}
|
||||
|
||||
// OptionsConfig represents the "storage.options" TOML config table.
|
||||
type OptionsConfig struct {
|
||||
// AdditionalImagesStores is the location of additional read/only
|
||||
// Image stores. Usually used to access Networked File System
|
||||
// for shared image content
|
||||
AdditionalImageStores []string `toml:"additionalimagestores"`
|
||||
|
||||
// Size
|
||||
Size string `toml:"size"`
|
||||
|
||||
// RemapUIDs is a list of default UID mappings to use for layers.
|
||||
RemapUIDs string `toml:"remap-uids"`
|
||||
// RemapGIDs is a list of default GID mappings to use for layers.
|
||||
RemapGIDs string `toml:"remap-gids"`
|
||||
|
||||
// RemapUser is the name of one or more entries in /etc/subuid which
|
||||
// should be used to set up default UID mappings.
|
||||
RemapUser string `toml:"remap-user"`
|
||||
// RemapGroup is the name of one or more entries in /etc/subgid which
|
||||
// should be used to set up default GID mappings.
|
||||
RemapGroup string `toml:"remap-group"`
|
||||
// Thinpool container options to be handed to thinpool drivers
|
||||
Thinpool struct{ ThinpoolOptionsConfig } `toml:"thinpool"`
|
||||
// OSTree repository
|
||||
OstreeRepo string `toml:"ostree_repo"`
|
||||
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home"`
|
||||
|
||||
// Alternative program to use for the mount of the file system
|
||||
MountProgram string `toml:"mount_program"`
|
||||
|
||||
// MountOpt specifies extra mount options used when mounting
|
||||
MountOpt string `toml:"mountopt"`
|
||||
}
|
||||
699
vendor/github.com/containers/storage/store.go
generated
vendored
699
vendor/github.com/containers/storage/store.go
generated
vendored
File diff suppressed because it is too large
Load Diff
11
vendor/github.com/containers/storage/vendor.conf
generated
vendored
11
vendor/github.com/containers/storage/vendor.conf
generated
vendored
@@ -1,14 +1,20 @@
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/Microsoft/go-winio 307e919c663683a9000576fdc855acaf9534c165
|
||||
github.com/Microsoft/hcsshim a8d9cc56cbce765a7eebdf4792e6ceceeff3edb8
|
||||
github.com/containers/image master
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/docker 86f080cff0914e9694068ed78d503701667c4c00
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/docker/libtrust master
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/mattn/go-shellwords 753a2322a99f87c0eff284980e77f53041555bc6
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/opencontainers/go-digest master
|
||||
github.com/opencontainers/image-spec master
|
||||
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
|
||||
github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
|
||||
github.com/opencontainers/selinux v1.1
|
||||
github.com/ostreedev/ostree-go master
|
||||
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||
github.com/pkg/errors master
|
||||
@@ -23,6 +29,3 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
gotest.tools master
|
||||
github.com/google/go-cmp master
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
10
vendor/github.com/docker/distribution/blobs.go
generated
vendored
10
vendor/github.com/docker/distribution/blobs.go
generated
vendored
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -67,19 +66,12 @@ type Descriptor struct {
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// Digest uniquely identifies the content. A byte stream can be verified
|
||||
// against this digest.
|
||||
// against against this digest.
|
||||
Digest digest.Digest `json:"digest,omitempty"`
|
||||
|
||||
// URLs contains the source URLs of this content.
|
||||
URLs []string `json:"urls,omitempty"`
|
||||
|
||||
// Annotations contains arbitrary metadata relating to the targeted content.
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
|
||||
// Platform describes the platform which the image in the manifest runs on.
|
||||
// This should only be used when referring to a manifest.
|
||||
Platform *v1.Platform `json:"platform,omitempty"`
|
||||
|
||||
// NOTE: Before adding a field here, please ensure that all
|
||||
// other options have been exhausted. Much of the type relationships
|
||||
// depend on the simplicity of this type.
|
||||
|
||||
4
vendor/github.com/docker/distribution/errors.go
generated
vendored
4
vendor/github.com/docker/distribution/errors.go
generated
vendored
@@ -20,10 +20,6 @@ var ErrManifestNotModified = errors.New("manifest not modified")
|
||||
// performed
|
||||
var ErrUnsupported = errors.New("operation unsupported")
|
||||
|
||||
// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1
|
||||
// manifest but the registry is configured to reject it
|
||||
var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported")
|
||||
|
||||
// ErrTagUnknown is returned if the given tag is not known by the tag service
|
||||
type ErrTagUnknown struct {
|
||||
Tag string
|
||||
|
||||
13
vendor/github.com/docker/distribution/metrics/prometheus.go
generated
vendored
13
vendor/github.com/docker/distribution/metrics/prometheus.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import "github.com/docker/go-metrics"
|
||||
|
||||
const (
|
||||
// NamespacePrefix is the namespace of prometheus metrics
|
||||
NamespacePrefix = "registry"
|
||||
)
|
||||
|
||||
var (
|
||||
// StorageNamespace is the prometheus namespace of blob/cache related operations
|
||||
StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil)
|
||||
)
|
||||
20
vendor/github.com/docker/distribution/registry.go
generated
vendored
20
vendor/github.com/docker/distribution/registry.go
generated
vendored
@@ -54,11 +54,6 @@ type RepositoryEnumerator interface {
|
||||
Enumerate(ctx context.Context, ingester func(string) error) error
|
||||
}
|
||||
|
||||
// RepositoryRemover removes given repository
|
||||
type RepositoryRemover interface {
|
||||
Remove(ctx context.Context, name reference.Named) error
|
||||
}
|
||||
|
||||
// ManifestServiceOption is a function argument for Manifest Service methods
|
||||
type ManifestServiceOption interface {
|
||||
Apply(ManifestService) error
|
||||
@@ -78,21 +73,6 @@ func (o WithTagOption) Apply(m ManifestService) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithManifestMediaTypes lists the media types the client wishes
|
||||
// the server to provide.
|
||||
func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption {
|
||||
return WithManifestMediaTypesOption{mediaTypes}
|
||||
}
|
||||
|
||||
// WithManifestMediaTypesOption holds a list of accepted media types
|
||||
type WithManifestMediaTypesOption struct{ MediaTypes []string }
|
||||
|
||||
// Apply conforms to the ManifestServiceOption interface
|
||||
func (o WithManifestMediaTypesOption) Apply(m ManifestService) error {
|
||||
// no implementation
|
||||
return nil
|
||||
}
|
||||
|
||||
// Repository is a named collection of manifests and layers.
|
||||
type Repository interface {
|
||||
// Named returns the name of the repository.
|
||||
|
||||
6
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
6
vendor/github.com/docker/distribution/registry/api/errcode/handler.go
generated
vendored
@@ -36,5 +36,9 @@ func ServeJSON(w http.ResponseWriter, err error) error {
|
||||
|
||||
w.WriteHeader(sc)
|
||||
|
||||
return json.NewEncoder(w).Encode(err)
|
||||
if err := json.NewEncoder(w).Encode(err); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
9
vendor/github.com/docker/distribution/registry/api/v2/routes.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/api/v2/routes.go
generated
vendored
@@ -14,6 +14,15 @@ const (
|
||||
RouteNameCatalog = "catalog"
|
||||
)
|
||||
|
||||
var allEndpoints = []string{
|
||||
RouteNameManifest,
|
||||
RouteNameCatalog,
|
||||
RouteNameTags,
|
||||
RouteNameBlob,
|
||||
RouteNameBlobUpload,
|
||||
RouteNameBlobUploadChunk,
|
||||
}
|
||||
|
||||
// Router builds a gorilla router with named routes for the various API
|
||||
// methods. This can be used directly by both server implementations and
|
||||
// clients.
|
||||
|
||||
@@ -45,13 +45,13 @@ type Manager interface {
|
||||
// to a backend.
|
||||
func NewSimpleManager() Manager {
|
||||
return &simpleManager{
|
||||
Challenges: make(map[string][]Challenge),
|
||||
Challanges: make(map[string][]Challenge),
|
||||
}
|
||||
}
|
||||
|
||||
type simpleManager struct {
|
||||
sync.RWMutex
|
||||
Challenges map[string][]Challenge
|
||||
Challanges map[string][]Challenge
|
||||
}
|
||||
|
||||
func normalizeURL(endpoint *url.URL) {
|
||||
@@ -64,7 +64,7 @@ func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
challenges := m.Challenges[endpoint.String()]
|
||||
challenges := m.Challanges[endpoint.String()]
|
||||
return challenges, nil
|
||||
}
|
||||
|
||||
@@ -82,7 +82,7 @@ func (m *simpleManager) AddResponse(resp *http.Response) error {
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.Challenges[urlCopy.String()] = challenges
|
||||
m.Challanges[urlCopy.String()] = challenges
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
78
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
78
vendor/github.com/docker/distribution/registry/client/repository.go
generated
vendored
@@ -62,7 +62,7 @@ func checkHTTPRedirect(req *http.Request, via []*http.Request) error {
|
||||
}
|
||||
|
||||
// NewRegistry creates a registry namespace which can be used to get a listing of repositories
|
||||
func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) {
|
||||
func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) {
|
||||
ub, err := v2.NewURLBuilderFromString(baseURL, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -75,14 +75,16 @@ func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error)
|
||||
}
|
||||
|
||||
return ®istry{
|
||||
client: client,
|
||||
ub: ub,
|
||||
client: client,
|
||||
ub: ub,
|
||||
context: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type registry struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
}
|
||||
|
||||
// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size
|
||||
@@ -131,7 +133,7 @@ func (r *registry) Repositories(ctx context.Context, entries []string, last stri
|
||||
}
|
||||
|
||||
// NewRepository creates a new Repository for the given repository name and base URL.
|
||||
func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
||||
func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) {
|
||||
ub, err := v2.NewURLBuilderFromString(baseURL, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -144,16 +146,18 @@ func NewRepository(name reference.Named, baseURL string, transport http.RoundTri
|
||||
}
|
||||
|
||||
return &repository{
|
||||
client: client,
|
||||
ub: ub,
|
||||
name: name,
|
||||
client: client,
|
||||
ub: ub,
|
||||
name: name,
|
||||
context: ctx,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type repository struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
name reference.Named
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
func (r *repository) Named() reference.Named {
|
||||
@@ -186,35 +190,32 @@ func (r *repository) Manifests(ctx context.Context, options ...distribution.Mani
|
||||
|
||||
func (r *repository) Tags(ctx context.Context) distribution.TagService {
|
||||
return &tags{
|
||||
client: r.client,
|
||||
ub: r.ub,
|
||||
name: r.Named(),
|
||||
client: r.client,
|
||||
ub: r.ub,
|
||||
context: r.context,
|
||||
name: r.Named(),
|
||||
}
|
||||
}
|
||||
|
||||
// tags implements remote tagging operations.
|
||||
type tags struct {
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
name reference.Named
|
||||
client *http.Client
|
||||
ub *v2.URLBuilder
|
||||
context context.Context
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
// All returns all tags
|
||||
func (t *tags) All(ctx context.Context) ([]string, error) {
|
||||
var tags []string
|
||||
|
||||
listURLStr, err := t.ub.BuildTagsURL(t.name)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
listURL, err := url.Parse(listURLStr)
|
||||
u, err := t.ub.BuildTagsURL(t.name)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
for {
|
||||
resp, err := t.client.Get(listURL.String())
|
||||
resp, err := t.client.Get(u)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
@@ -234,13 +235,7 @@ func (t *tags) All(ctx context.Context) ([]string, error) {
|
||||
}
|
||||
tags = append(tags, tagsResponse.Tags...)
|
||||
if link := resp.Header.Get("Link"); link != "" {
|
||||
linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
linkURL, err := url.Parse(linkURLStr)
|
||||
if err != nil {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
listURL = listURL.ResolveReference(linkURL)
|
||||
u = strings.Trim(strings.Split(link, ";")[0], "<>")
|
||||
} else {
|
||||
return tags, nil
|
||||
}
|
||||
@@ -326,8 +321,7 @@ func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, er
|
||||
defer resp.Body.Close()
|
||||
|
||||
switch {
|
||||
case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0:
|
||||
// if the response is a success AND a Docker-Content-Digest can be retrieved from the headers
|
||||
case resp.StatusCode >= 200 && resp.StatusCode < 400:
|
||||
return descriptorFromResponse(resp)
|
||||
default:
|
||||
// if the response is an error - there will be no body to decode.
|
||||
@@ -427,22 +421,18 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
||||
ref reference.Named
|
||||
err error
|
||||
contentDgst *digest.Digest
|
||||
mediaTypes []string
|
||||
)
|
||||
|
||||
for _, option := range options {
|
||||
switch opt := option.(type) {
|
||||
case distribution.WithTagOption:
|
||||
if opt, ok := option.(distribution.WithTagOption); ok {
|
||||
digestOrTag = opt.Tag
|
||||
ref, err = reference.WithTag(ms.name, opt.Tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case contentDigestOption:
|
||||
} else if opt, ok := option.(contentDigestOption); ok {
|
||||
contentDgst = opt.digest
|
||||
case distribution.WithManifestMediaTypesOption:
|
||||
mediaTypes = opt.MediaTypes
|
||||
default:
|
||||
} else {
|
||||
err := option.Apply(ms)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -458,10 +448,6 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
||||
}
|
||||
}
|
||||
|
||||
if len(mediaTypes) == 0 {
|
||||
mediaTypes = distribution.ManifestMediaTypes()
|
||||
}
|
||||
|
||||
u, err := ms.ub.BuildManifestURL(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -472,7 +458,7 @@ func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...dis
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, t := range mediaTypes {
|
||||
for _, t := range distribution.ManifestMediaTypes() {
|
||||
req.Header.Add("Accept", t)
|
||||
}
|
||||
|
||||
|
||||
9
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
9
vendor/github.com/docker/distribution/registry/client/transport/http_reader.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
@@ -96,7 +97,7 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
|
||||
lastReaderOffset := hrs.readerOffset
|
||||
|
||||
if whence == io.SeekStart && hrs.rc == nil {
|
||||
if whence == os.SEEK_SET && hrs.rc == nil {
|
||||
// If no request has been made yet, and we are seeking to an
|
||||
// absolute position, set the read offset as well to avoid an
|
||||
// unnecessary request.
|
||||
@@ -112,14 +113,14 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
newOffset := hrs.seekOffset
|
||||
|
||||
switch whence {
|
||||
case io.SeekCurrent:
|
||||
case os.SEEK_CUR:
|
||||
newOffset += offset
|
||||
case io.SeekEnd:
|
||||
case os.SEEK_END:
|
||||
if hrs.size < 0 {
|
||||
return 0, errors.New("content length not known")
|
||||
}
|
||||
newOffset = hrs.size + offset
|
||||
case io.SeekStart:
|
||||
case os.SEEK_SET:
|
||||
newOffset = offset
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/distribution"
|
||||
prometheus "github.com/docker/distribution/metrics"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -39,11 +38,6 @@ type cachedBlobStatter struct {
|
||||
tracker MetricsTracker
|
||||
}
|
||||
|
||||
var (
|
||||
// cacheCount is the number of total cache request received/hits/misses
|
||||
cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type")
|
||||
)
|
||||
|
||||
// NewCachedBlobStatter creates a new statter which prefers a cache and
|
||||
// falls back to a backend.
|
||||
func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService {
|
||||
@@ -64,7 +58,6 @@ func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, b
|
||||
}
|
||||
|
||||
func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) {
|
||||
cacheCount.WithValues("Request").Inc(1)
|
||||
desc, err := cbds.cache.Stat(ctx, dgst)
|
||||
if err != nil {
|
||||
if err != distribution.ErrBlobUnknown {
|
||||
@@ -73,13 +66,12 @@ func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (di
|
||||
|
||||
goto fallback
|
||||
}
|
||||
cacheCount.WithValues("Hit").Inc(1)
|
||||
|
||||
if cbds.tracker != nil {
|
||||
cbds.tracker.Hit()
|
||||
}
|
||||
return desc, nil
|
||||
fallback:
|
||||
cacheCount.WithValues("Miss").Inc(1)
|
||||
if cbds.tracker != nil {
|
||||
cbds.tracker.Miss()
|
||||
}
|
||||
|
||||
24
vendor/github.com/docker/distribution/vendor.conf
generated
vendored
24
vendor/github.com/docker/distribution/vendor.conf
generated
vendored
@@ -1,36 +1,29 @@
|
||||
github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b
|
||||
github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052
|
||||
github.com/Azure/azure-sdk-for-go 088007b3b08cc02b27f2eadfdcd870958460ce7e
|
||||
github.com/Azure/go-autorest ec5f4903f77ed9927ac95b19ab8e44ada64c1356
|
||||
github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4
|
||||
github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490
|
||||
github.com/aws/aws-sdk-go c6fc52983ea2375810aa38ddb5370e9cdf611716
|
||||
github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274
|
||||
github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702
|
||||
github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782
|
||||
github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2
|
||||
github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04
|
||||
github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab
|
||||
github.com/docker/goamz f0a21f5b2e12f83a505ecf79b633bb2035cf6f85
|
||||
github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21
|
||||
github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257
|
||||
github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c
|
||||
github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
github.com/gorilla/context 14f550f51af52180c2eefed15e5fd18d63c0a64a
|
||||
github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b
|
||||
github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604
|
||||
github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
|
||||
github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d
|
||||
github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f
|
||||
github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3
|
||||
github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c
|
||||
github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39
|
||||
github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef
|
||||
github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6
|
||||
github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564
|
||||
github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c
|
||||
github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563
|
||||
github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd
|
||||
github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd
|
||||
github.com/ncw/swift b964f2ca856aac39885e258ad25aec08d5f64ee6
|
||||
github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064
|
||||
github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842
|
||||
github.com/stevvooe/resumable 2aaf90b2ceea5072cb503ef2a620b08ff3119870
|
||||
github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985
|
||||
github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e
|
||||
github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128
|
||||
@@ -45,7 +38,6 @@ google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2
|
||||
google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994
|
||||
gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673
|
||||
gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b
|
||||
gopkg.in/yaml.v2 v2.2.1
|
||||
gopkg.in/yaml.v2 bef53efd0c76e49e6de55ead051f886bea7e9420
|
||||
rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git
|
||||
github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
|
||||
github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882
|
||||
|
||||
4
vendor/github.com/docker/docker/contrib/README.md
generated
vendored
Normal file
4
vendor/github.com/docker/docker/contrib/README.md
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
The `contrib` directory contains scripts, images, and other helpful things
|
||||
which are not part of the core docker distribution. Please note that they
|
||||
could be out of date, since they do not receive the same attention as the
|
||||
rest of the repository.
|
||||
10
vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c
generated
vendored
Normal file
10
vendor/github.com/docker/docker/contrib/nnp-test/nnp-test.c
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
printf("EUID=%d\n", geteuid());
|
||||
return 0;
|
||||
}
|
||||
|
||||
16
vendor/github.com/docker/docker/contrib/syscall-test/acct.c
generated
vendored
Normal file
16
vendor/github.com/docker/docker/contrib/syscall-test/acct.c
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <unistd.h>
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int err = acct("/tmp/t");
|
||||
if (err == -1) {
|
||||
fprintf(stderr, "acct failed: %s\n", strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
7
vendor/github.com/docker/docker/contrib/syscall-test/exit32.s
generated
vendored
Normal file
7
vendor/github.com/docker/docker/contrib/syscall-test/exit32.s
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
.globl _start
|
||||
.text
|
||||
_start:
|
||||
xorl %eax, %eax
|
||||
incl %eax
|
||||
movb $0, %bl
|
||||
int $0x80
|
||||
63
vendor/github.com/docker/docker/contrib/syscall-test/ns.c
generated
vendored
Normal file
63
vendor/github.com/docker/docker/contrib/syscall-test/ns.c
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
#define _GNU_SOURCE
|
||||
#include <errno.h>
|
||||
#include <sched.h>
|
||||
#include <signal.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/wait.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */
|
||||
|
||||
struct clone_args {
|
||||
char **argv;
|
||||
};
|
||||
|
||||
// child_exec is the func that will be executed as the result of clone
|
||||
static int child_exec(void *stuff)
|
||||
{
|
||||
struct clone_args *args = (struct clone_args *)stuff;
|
||||
if (execvp(args->argv[0], args->argv) != 0) {
|
||||
fprintf(stderr, "failed to execvp arguments %s\n",
|
||||
strerror(errno));
|
||||
exit(-1);
|
||||
}
|
||||
// we should never reach here!
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct clone_args args;
|
||||
args.argv = &argv[1];
|
||||
|
||||
int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD;
|
||||
|
||||
// allocate stack for child
|
||||
char *stack; /* Start of stack buffer */
|
||||
char *child_stack; /* End of stack buffer */
|
||||
stack =
|
||||
mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE,
|
||||
MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0);
|
||||
if (stack == MAP_FAILED) {
|
||||
fprintf(stderr, "mmap failed: %s\n", strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
child_stack = stack + STACK_SIZE; /* Assume stack grows downward */
|
||||
|
||||
// the result of this call is that our child_exec will be run in another
|
||||
// process returning its pid
|
||||
pid_t pid = clone(child_exec, child_stack, clone_flags, &args);
|
||||
if (pid < 0) {
|
||||
fprintf(stderr, "clone failed: %s\n", strerror(errno));
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
// lets wait on our child process here before we, the parent, exits
|
||||
if (waitpid(pid, NULL, 0) == -1) {
|
||||
fprintf(stderr, "failed to wait pid %d\n", pid);
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
exit(EXIT_SUCCESS);
|
||||
}
|
||||
14
vendor/github.com/docker/docker/contrib/syscall-test/raw.c
generated
vendored
Normal file
14
vendor/github.com/docker/docker/contrib/syscall-test/raw.c
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <netinet/ip.h>
|
||||
#include <netinet/udp.h>
|
||||
|
||||
int main() {
|
||||
if (socket(PF_INET, SOCK_RAW, IPPROTO_UDP) == -1) {
|
||||
perror("socket");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
11
vendor/github.com/docker/docker/contrib/syscall-test/setgid.c
generated
vendored
Normal file
11
vendor/github.com/docker/docker/contrib/syscall-test/setgid.c
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
if (setgid(1) == -1) {
|
||||
perror("setgid");
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user