mirror of
https://github.com/containers/skopeo.git
synced 2026-01-31 14:29:03 +00:00
Compare commits
89 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e96a9b0e1b | ||
|
|
08c30b8f06 | ||
|
|
05212df1c5 | ||
|
|
7ec68dd463 | ||
|
|
6eb5131b85 | ||
|
|
736cd7641d | ||
|
|
78bd5dd3df | ||
|
|
ecd675e0a6 | ||
|
|
5675895460 | ||
|
|
0f8f870bd3 | ||
|
|
a51e38e60d | ||
|
|
8fe1595f92 | ||
|
|
2497f500d5 | ||
|
|
afa92d58f6 | ||
|
|
958cafb2c0 | ||
|
|
1d1bf0d393 | ||
|
|
3094320203 | ||
|
|
39de98777d | ||
|
|
8084f6f4e2 | ||
|
|
6ef45e5cf1 | ||
|
|
444b90a9cf | ||
|
|
72a3dc17ee | ||
|
|
88c748f47a | ||
|
|
7e8c89d619 | ||
|
|
694f915003 | ||
|
|
a77b409619 | ||
|
|
1faff791ce | ||
|
|
8b8afe0fda | ||
|
|
09a120a59b | ||
|
|
c769c7789e | ||
|
|
3ea3965e5e | ||
|
|
ee8391db34 | ||
|
|
e1cc97d9d7 | ||
|
|
f30756a9bb | ||
|
|
33b474b224 | ||
|
|
485a7aa330 | ||
|
|
59117e6e3d | ||
|
|
8ee3ead743 | ||
|
|
bc39e4f9b6 | ||
|
|
3017d87ade | ||
|
|
d8f1d4572b | ||
|
|
41d8dd8b80 | ||
|
|
bcf3dbbb93 | ||
|
|
bfc0c5e531 | ||
|
|
013ebac8d8 | ||
|
|
fbc2e4f70f | ||
|
|
72468d6817 | ||
|
|
5dec940523 | ||
|
|
761a6811c1 | ||
|
|
b3a023f9dd | ||
|
|
5aa217fe0d | ||
|
|
737438d026 | ||
|
|
1715c90841 | ||
|
|
187299a20b | ||
|
|
89d8bddd9b | ||
|
|
ba649c56bf | ||
|
|
3456577268 | ||
|
|
b52e700666 | ||
|
|
ee32f1f7aa | ||
|
|
5af0da9de6 | ||
|
|
879a6d793f | ||
|
|
2734f93e30 | ||
|
|
2b97124e4a | ||
|
|
7815a5801e | ||
|
|
501e1be3cf | ||
|
|
fc386a6dca | ||
|
|
2a134a0ddd | ||
|
|
17250d7e8d | ||
|
|
65d28709c3 | ||
|
|
d6c6c78d1b | ||
|
|
67ffa00b1d | ||
|
|
a581847345 | ||
|
|
bcd26a4ae4 | ||
|
|
e38c345f23 | ||
|
|
0421fb04c2 | ||
|
|
82186b916f | ||
|
|
15eed5beda | ||
|
|
81837bd55b | ||
|
|
3dec6a1cdf | ||
|
|
fe14427129 | ||
|
|
be27588418 | ||
|
|
fb84437cd1 | ||
|
|
d9b495ca38 | ||
|
|
6b93d4794f | ||
|
|
5d3849a510 | ||
|
|
fef142f811 | ||
|
|
2684e51aa5 | ||
|
|
e814f9605a | ||
|
|
5d136a46ed |
@@ -15,7 +15,7 @@ that we follow.
|
||||
## Reporting Issues
|
||||
|
||||
Before reporting an issue, check our backlog of
|
||||
[open issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[open issues](https://github.com/containers/skopeo/issues)
|
||||
to see if someone else has already reported it. If so, feel free to add
|
||||
your scenario, or additional information, to the discussion. Or simply
|
||||
"subscribe" to it to be notified when it is updated.
|
||||
@@ -122,9 +122,9 @@ IRC group on `irc.freenode.net` called `container-projects`
|
||||
that has been setup.
|
||||
|
||||
For discussions around issues/bugs and features, you can use the github
|
||||
[issues](https://github.com/projectatomic/skopeo/issues)
|
||||
[issues](https://github.com/containers/skopeo/issues)
|
||||
and
|
||||
[PRs](https://github.com/projectatomic/skopeo/pulls)
|
||||
[PRs](https://github.com/containers/skopeo/pulls)
|
||||
tracking system.
|
||||
|
||||
<!--
|
||||
|
||||
@@ -42,8 +42,9 @@ RUN set -x \
|
||||
|
||||
ENV GOPATH /usr/share/gocode:/go
|
||||
ENV PATH $GOPATH/bin:/usr/share/gocode/bin:$PATH
|
||||
RUN go get github.com/golang/lint/golint
|
||||
WORKDIR /go/src/github.com/projectatomic/skopeo
|
||||
COPY . /go/src/github.com/projectatomic/skopeo
|
||||
RUN go version
|
||||
RUN go get golang.org/x/lint/golint
|
||||
WORKDIR /go/src/github.com/containers/skopeo
|
||||
COPY . /go/src/github.com/containers/skopeo
|
||||
|
||||
#ENTRYPOINT ["hack/dind"]
|
||||
|
||||
@@ -11,4 +11,4 @@ RUN apt-get update && apt-get install -y \
|
||||
libostree-dev
|
||||
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/projectatomic/skopeo
|
||||
WORKDIR /src/github.com/containers/skopeo
|
||||
|
||||
46
Makefile
46
Makefile
@@ -1,4 +1,4 @@
|
||||
.PHONY: all binary build-container build-local clean install install-binary install-completions shell test-integration
|
||||
.PHONY: all binary build-container docs build-local clean install install-binary install-completions shell test-integration vendor
|
||||
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
@@ -24,6 +24,7 @@ SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= go-md2man
|
||||
GO ?= go
|
||||
CONTAINER_RUNTIME := $(shell command -v podman 2> /dev/null || echo docker)
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
@@ -34,17 +35,17 @@ ifeq ($(shell go env GOOS), linux)
|
||||
endif
|
||||
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
DOCKER_IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
IMAGE := skopeo-dev$(if $(GIT_BRANCH),:$(GIT_BRANCH))
|
||||
# set env like gobuildtag?
|
||||
DOCKER_FLAGS := docker run --rm -i #$(DOCKER_ENVS)
|
||||
CONTAINER_CMD := ${CONTAINER_RUNTIME} run --rm -i -e TESTFLAGS="$(TESTFLAGS)" #$(CONTAINER_ENVS)
|
||||
# if this session isn't interactive, then we don't want to allocate a
|
||||
# TTY, which would fail, but if it is interactive, we do want to attach
|
||||
# so that the user can send e.g. ^C through.
|
||||
INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0)
|
||||
ifeq ($(INTERACTIVE), 1)
|
||||
DOCKER_FLAGS += -t
|
||||
CONTAINER_CMD += -t
|
||||
endif
|
||||
DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)"
|
||||
CONTAINER_RUN := $(CONTAINER_CMD) "$(IMAGE)"
|
||||
|
||||
GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
@@ -55,9 +56,7 @@ LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG) $(DARWIN_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
DOCKER_BUILD_IMAGE := skopeobuildimage
|
||||
ifeq ($(DISABLE_CGO), 1)
|
||||
override DOCKER_BUILD_IMAGE = golang:1.10
|
||||
override BUILDTAGS = containers_image_ostree_stub exclude_graphdriver_devicemapper exclude_graphdriver_btrfs containers_image_openpgp
|
||||
endif
|
||||
|
||||
@@ -67,23 +66,19 @@ endif
|
||||
# use source debugging tools like delve.
|
||||
all: binary docs
|
||||
|
||||
# Build a docker image (skopeobuild) that has everything we need to build.
|
||||
# Build a container image (skopeobuild) that has everything we need to build.
|
||||
# Then do the build and the output (skopeo) should appear in current dir
|
||||
binary: cmd/skopeo
|
||||
ifneq ($(DISABLE_CGO), 1)
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t $(DOCKER_BUILD_IMAGE) .
|
||||
endif
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
$(DOCKER_BUILD_IMAGE) make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
ifneq ($(DISABLE_CGO), 1)
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t $(DOCKER_BUILD_IMAGE) .
|
||||
endif
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
$(DOCKER_BUILD_IMAGE) make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
${CONTAINER_RUNTIME} run --rm --security-opt label=disable -v $$(pwd):/src/github.com/containers/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using Docker containers
|
||||
# Build w/o using containers
|
||||
binary-local:
|
||||
$(GPGME_ENV) $(GO) build ${GO_DYN_FLAGS} -ldflags "-X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
@@ -91,12 +86,11 @@ binary-local-static:
|
||||
$(GPGME_ENV) $(GO) build -ldflags "-extldflags \"-static\" -X main.gitCommit=${GIT_COMMIT}" -gcflags "$(GOGCFLAGS)" -tags "$(BUILDTAGS)" -o skopeo ./cmd/skopeo
|
||||
|
||||
build-container:
|
||||
docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" .
|
||||
${CONTAINER_RUNTIME} build ${BUILD_ARGS} -t "$(IMAGE)" .
|
||||
|
||||
docs/%.1: docs/%.1.md
|
||||
$(GO_MD2MAN) -in $< -out $@.tmp && touch $@.tmp && mv $@.tmp $@
|
||||
|
||||
.PHONY: docs
|
||||
docs: $(MANPAGES_MD:%.md=%)
|
||||
|
||||
clean:
|
||||
@@ -122,20 +116,20 @@ install-completions:
|
||||
install -m 644 completions/bash/skopeo ${BASHINSTALLDIR}/skopeo
|
||||
|
||||
shell: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash
|
||||
$(CONTAINER_RUN) bash
|
||||
|
||||
check: validate test-unit test-integration
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
$(CONTAINER_RUN) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences, e.g. GO15VENDOREXPERIMENT.
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
$(CONTAINER_RUN) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
$(CONTAINER_RUN) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
# This target is only intended for development, e.g. executing it from an IDE. Use (make test) for CI or pre-release testing.
|
||||
test-all-local: validate-local test-unit-local
|
||||
@@ -144,7 +138,7 @@ validate-local:
|
||||
hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
test-unit-local:
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/projectatomic/skopeo/\(integration\|vendor/.*\)$$')
|
||||
$(GPGME_ENV) $(GO) test -tags "$(BUILDTAGS)" $$($(GO) list -tags "$(BUILDTAGS)" -e ./... | grep -v '^github\.com/containers/skopeo/\(integration\|vendor/.*\)$$')
|
||||
|
||||
vendor: vendor.conf
|
||||
vndr -whitelist '^github.com/containers/image/docs/.*'
|
||||
|
||||
11
README.md
11
README.md
@@ -1,7 +1,7 @@
|
||||
skopeo [](https://travis-ci.org/projectatomic/skopeo)
|
||||
skopeo [](https://travis-ci.org/containers/skopeo)
|
||||
=
|
||||
|
||||
<img src="https://cdn.rawgit.com/projectatomic/skopeo/master/docs/skopeo.svg" width="250">
|
||||
<img src="https://cdn.rawgit.com/containers/skopeo/master/docs/skopeo.svg" width="250">
|
||||
|
||||
----
|
||||
|
||||
@@ -12,7 +12,7 @@ skopeo [
|
||||
|
||||
// contextsFromGlobalOptions returns source and destionation types.SystemContext depending on c.
|
||||
func contextsFromGlobalOptions(c *cli.Context) (*types.SystemContext, *types.SystemContext, error) {
|
||||
sourceCtx, err := contextFromGlobalOptions(c, "src-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
destinationCtx, err := contextFromGlobalOptions(c, "dest-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sourceCtx, destinationCtx, nil
|
||||
type copyOptions struct {
|
||||
global *globalOptions
|
||||
srcImage *imageOptions
|
||||
destImage *imageDestOptions
|
||||
additionalTags cli.StringSlice // For docker-archive: destinations, in addition to the name:tag specified as destination, also add these
|
||||
removeSignatures bool // Do not copy signatures from the source image
|
||||
signByFingerprint string // Sign the image using a GPG key with the specified fingerprint
|
||||
format optionalString // Force conversion of the image to a specified format
|
||||
}
|
||||
|
||||
func copyHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 2 {
|
||||
cli.ShowCommandHelp(c, "copy")
|
||||
return errors.New("Exactly two arguments expected")
|
||||
func copyCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
srcFlags, srcOpts := imageFlags(global, sharedOpts, "src-", "screds")
|
||||
destFlags, destOpts := imageDestFlags(global, sharedOpts, "dest-", "dcreds")
|
||||
opts := copyOptions{global: global,
|
||||
srcImage: srcOpts,
|
||||
destImage: destOpts,
|
||||
}
|
||||
|
||||
policyContext, err := getPolicyContext(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(c.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", c.Args()[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(c.Args()[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", c.Args()[1], err)
|
||||
}
|
||||
signBy := c.String("sign-by")
|
||||
removeSignatures := c.Bool("remove-signatures")
|
||||
|
||||
sourceCtx, destinationCtx, err := contextsFromGlobalOptions(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if c.IsSet("format") {
|
||||
switch c.String("format") {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose on of the supported formats: 'oci', 'v2s1', or 'v2s2'", c.String("format"))
|
||||
}
|
||||
}
|
||||
|
||||
if c.IsSet("additional-tag") {
|
||||
for _, image := range c.StringSlice("additional-tag") {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
}
|
||||
|
||||
return copy.Image(context.Background(), policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: removeSignatures,
|
||||
SignBy: signBy,
|
||||
ReportWriter: os.Stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
}
|
||||
|
||||
var copyCmd = cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
return cli.Command{
|
||||
Name: "copy",
|
||||
Usage: "Copy an IMAGE-NAME from one location to another",
|
||||
Description: fmt.Sprintf(`
|
||||
|
||||
Container "IMAGE-NAME" uses a "transport":"details" format.
|
||||
|
||||
@@ -110,86 +46,99 @@ var copyCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: copyHandler,
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: []cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-creds, screds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the source registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-creds, dcreds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the destination registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the source registry or daemon",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "src-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container source registry or daemon (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the destination registry or daemon",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "dest-tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container destination registry or daemon (defaults to true)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-ostree-tmp-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-shared-blob-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use to fetch retrieved blobs (OCI layout sources only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-shared-blob-dir",
|
||||
Value: "",
|
||||
Usage: "`DIRECTORY` to use to store retrieved blobs (OCI layout destinations only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dest-compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "src-daemon-host",
|
||||
Value: "",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon sources only)",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "dest-daemon-host",
|
||||
Value: "",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon destinations only)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "SOURCE-IMAGE DESTINATION-IMAGE",
|
||||
Action: commandAction(opts.run),
|
||||
// FIXME: Do we need to namespace the GPG aspect?
|
||||
Flags: append(append(append([]cli.Flag{
|
||||
cli.StringSliceFlag{
|
||||
Name: "additional-tag",
|
||||
Usage: "additional tags (supports docker-archive)",
|
||||
Value: &opts.additionalTags, // Surprisingly StringSliceFlag does not support Destination:, but modifies Value: in place.
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "remove-signatures",
|
||||
Usage: "Do not copy signatures from SOURCE-IMAGE",
|
||||
Destination: &opts.removeSignatures,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "sign-by",
|
||||
Usage: "Sign the image using a GPG key with the specified `FINGERPRINT`",
|
||||
Destination: &opts.signByFingerprint,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "format, f",
|
||||
Usage: "`MANIFEST TYPE` (oci, v2s1, or v2s2) to use when saving image to directory using the 'dir:' transport (default is manifest type of source)",
|
||||
Value: newOptionalStringValue(&opts.format),
|
||||
},
|
||||
}, sharedFlags...), srcFlags...), destFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *copyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 2 {
|
||||
return errorShouldDisplayUsage{errors.New("Exactly two arguments expected")}
|
||||
}
|
||||
|
||||
policyContext, err := opts.global.getPolicyContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error loading trust policy: %v", err)
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := alltransports.ParseImageName(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", args[0], err)
|
||||
}
|
||||
destRef, err := alltransports.ParseImageName(args[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", args[1], err)
|
||||
}
|
||||
|
||||
sourceCtx, err := opts.srcImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
destinationCtx, err := opts.destImage.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifestType string
|
||||
if opts.format.present {
|
||||
switch opts.format.value {
|
||||
case "oci":
|
||||
manifestType = imgspecv1.MediaTypeImageManifest
|
||||
case "v2s1":
|
||||
manifestType = manifest.DockerV2Schema1SignedMediaType
|
||||
case "v2s2":
|
||||
manifestType = manifest.DockerV2Schema2MediaType
|
||||
default:
|
||||
return fmt.Errorf("unknown format %q. Choose one of the supported formats: 'oci', 'v2s1', or 'v2s2'", opts.format.value)
|
||||
}
|
||||
}
|
||||
|
||||
for _, image := range opts.additionalTags {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing additional-tag '%s': %v", image, err)
|
||||
}
|
||||
namedTagged, isNamedTagged := ref.(reference.NamedTagged)
|
||||
if !isNamedTagged {
|
||||
return fmt.Errorf("additional-tag '%s' must be a tagged reference", image)
|
||||
}
|
||||
destinationCtx.DockerArchiveAdditionalTags = append(destinationCtx.DockerArchiveAdditionalTags, namedTagged)
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
||||
RemoveSignatures: opts.removeSignatures,
|
||||
SignBy: opts.signByFingerprint,
|
||||
ReportWriter: stdout,
|
||||
SourceCtx: sourceCtx,
|
||||
DestinationCtx: destinationCtx,
|
||||
ForceManifestMIMEType: manifestType,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
@@ -11,27 +11,22 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func deleteHandler(c *cli.Context) error {
|
||||
if len(c.Args()) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(c.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", c.Args()[0], err)
|
||||
}
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ref.DeleteImage(context.Background(), sys)
|
||||
type deleteOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
var deleteCmd = cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
func deleteCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := deleteOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "delete",
|
||||
Usage: "Delete image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Delete an "IMAGE_NAME" from a transport
|
||||
|
||||
Supported transports:
|
||||
@@ -39,26 +34,28 @@ var deleteCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: deleteHandler,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *deleteOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := alltransports.ParseImageName(args[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", args[0], err)
|
||||
}
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
return ref.DeleteImage(ctx, sys)
|
||||
}
|
||||
|
||||
75
cmd/skopeo/flag.go
Normal file
75
cmd/skopeo/flag.go
Normal file
@@ -0,0 +1,75 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
// optionalBool is a boolean with a separate presence flag.
|
||||
type optionalBool struct {
|
||||
present bool
|
||||
value bool
|
||||
}
|
||||
|
||||
// optionalBool is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.Bool, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalBool to (pretend to) force callers to use
|
||||
// newOptionalBool
|
||||
type optionalBoolValue optionalBool
|
||||
|
||||
func newOptionalBoolValue(p *optionalBool) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalBoolValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) Set(s string) error {
|
||||
v, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ob.value = v
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag is interpreted as --flag=true
|
||||
}
|
||||
return strconv.FormatBool(ob.value)
|
||||
}
|
||||
|
||||
func (ob *optionalBoolValue) IsBoolFlag() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// optionalString is a string with a separate presence flag.
|
||||
type optionalString struct {
|
||||
present bool
|
||||
value string
|
||||
}
|
||||
|
||||
// optionalString is a cli.Generic == flag.Value implementation equivalent to
|
||||
// the one underlying flag.String, except that it records whether the flag has been set.
|
||||
// This is distinct from optionalString to (pretend to) force callers to use
|
||||
// newoptionalString
|
||||
type optionalStringValue optionalString
|
||||
|
||||
func newOptionalStringValue(p *optionalString) cli.Generic {
|
||||
p.present = false
|
||||
return (*optionalStringValue)(p)
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) Set(s string) error {
|
||||
ob.value = s
|
||||
ob.present = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ob *optionalStringValue) String() string {
|
||||
if !ob.present {
|
||||
return "" // This is, sadly, not round-trip safe: --flag= is interpreted as {present:true, value:""}
|
||||
}
|
||||
return ob.value
|
||||
}
|
||||
239
cmd/skopeo/flag_test.go
Normal file
239
cmd/skopeo/flag_test.go
Normal file
@@ -0,0 +1,239 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func TestOptionalBoolSet(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input string
|
||||
accepted bool
|
||||
value bool
|
||||
}{
|
||||
// Valid inputs documented for strconv.ParseBool == flag.BoolVar
|
||||
{"1", true, true},
|
||||
{"t", true, true},
|
||||
{"T", true, true},
|
||||
{"TRUE", true, true},
|
||||
{"true", true, true},
|
||||
{"True", true, true},
|
||||
{"0", true, false},
|
||||
{"f", true, false},
|
||||
{"F", true, false},
|
||||
{"FALSE", true, false},
|
||||
{"false", true, false},
|
||||
{"False", true, false},
|
||||
// A few invalid inputs
|
||||
{"", false, false},
|
||||
{"yes", false, false},
|
||||
{"no", false, false},
|
||||
{"2", false, false},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
require.False(t, ob.present)
|
||||
err := v.Set(c.input)
|
||||
if c.accepted {
|
||||
assert.NoError(t, err, c.input)
|
||||
assert.Equal(t, c.value, ob.value)
|
||||
} else {
|
||||
assert.Error(t, err, c.input)
|
||||
assert.False(t, ob.present) // Just to be extra paranoid.
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOB, commandOB optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OB",
|
||||
Value: newOptionalBoolValue(&globalOB),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OB",
|
||||
Value: newOptionalBoolValue(&commandOB),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOB.present)
|
||||
assert.False(t, commandOB.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalBoolString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalBool
|
||||
expected string
|
||||
}{
|
||||
{optionalBool{present: true, value: true}, "true"},
|
||||
{optionalBool{present: true, value: false}, "false"},
|
||||
{optionalBool{present: false, value: true}, ""},
|
||||
{optionalBool{present: false, value: false}, ""},
|
||||
} {
|
||||
var ob optionalBool
|
||||
v := newOptionalBoolValue(&ob)
|
||||
ob = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalBoolIsBoolFlag(t *testing.T) {
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOB optionalBool
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalBool{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OB=true", "1", "2"}, optionalBool{present: true, value: true}, []string{"1", "2"}}, // --OB=true
|
||||
{[]string{"--OB=false", "1", "2"}, optionalBool{present: true, value: false}, []string{"1", "2"}}, // --OB=false
|
||||
{[]string{"--OB", "true", "1", "2"}, optionalBool{present: true, value: true}, []string{"true", "1", "2"}}, // --OB true
|
||||
{[]string{"--OB", "false", "1", "2"}, optionalBool{present: true, value: true}, []string{"false", "1", "2"}}, // --OB false
|
||||
} {
|
||||
var ob optionalBool
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OB",
|
||||
Value: newOptionalBoolValue(&ob),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOB, ob)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringSet(t *testing.T) {
|
||||
// Really just a smoke test, but differentiating between not present and empty.
|
||||
for _, c := range []string{"", "hello"} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
require.False(t, os.present)
|
||||
err := v.Set(c)
|
||||
assert.NoError(t, err, c)
|
||||
assert.Equal(t, c, os.value)
|
||||
}
|
||||
|
||||
// Nothing actually explicitly says that .Set() is never called when the flag is not present on the command line;
|
||||
// so, check that it is not being called, at least in the straightforward case (it's not possible to test that it
|
||||
// is not called in any possible situation).
|
||||
var globalOS, commandOS optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Flags = []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "global-OS",
|
||||
Value: newOptionalStringValue(&globalOS),
|
||||
},
|
||||
}
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "command-OS",
|
||||
Value: newOptionalStringValue(&commandOS),
|
||||
},
|
||||
},
|
||||
Action: func(*cli.Context) error {
|
||||
assert.False(t, globalOS.present)
|
||||
assert.False(t, commandOS.present)
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run([]string{"app", "cmd"})
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
|
||||
func TestOptionalStringString(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
input optionalString
|
||||
expected string
|
||||
}{
|
||||
{optionalString{present: true, value: "hello"}, "hello"},
|
||||
{optionalString{present: true, value: ""}, ""},
|
||||
{optionalString{present: false, value: "hello"}, ""},
|
||||
{optionalString{present: false, value: ""}, ""},
|
||||
} {
|
||||
var os optionalString
|
||||
v := newOptionalStringValue(&os)
|
||||
os = c.input
|
||||
res := v.String()
|
||||
assert.Equal(t, c.expected, res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOptionalStringIsBoolFlag(t *testing.T) {
|
||||
// NOTE: optionalStringValue does not implement IsBoolFlag!
|
||||
// IsBoolFlag means that the argument value must either be part of the same argument, with =;
|
||||
// if there is no =, the value is set to true.
|
||||
// This differs form other flags, where the argument is required and may be either separated with = or supplied in the next argument.
|
||||
for _, c := range []struct {
|
||||
input []string
|
||||
expectedOS optionalString
|
||||
expectedArgs []string
|
||||
}{
|
||||
{[]string{"1", "2"}, optionalString{present: false}, []string{"1", "2"}}, // Flag not present
|
||||
{[]string{"--OS=hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS=true
|
||||
{[]string{"--OS=", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS=false
|
||||
{[]string{"--OS", "hello", "1", "2"}, optionalString{present: true, value: "hello"}, []string{"1", "2"}}, // --OS true
|
||||
{[]string{"--OS", "", "1", "2"}, optionalString{present: true, value: ""}, []string{"1", "2"}}, // --OS false
|
||||
} {
|
||||
var os optionalString
|
||||
actionRun := false
|
||||
app := cli.NewApp()
|
||||
app.Commands = []cli.Command{{
|
||||
Name: "cmd",
|
||||
Flags: []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: "OS",
|
||||
Value: newOptionalStringValue(&os),
|
||||
},
|
||||
},
|
||||
Action: func(ctx *cli.Context) error {
|
||||
assert.Equal(t, c.expectedOS, os)
|
||||
assert.Equal(t, c.expectedArgs, ([]string)(ctx.Args()))
|
||||
actionRun = true
|
||||
return nil
|
||||
},
|
||||
}}
|
||||
err := app.Run(append([]string{"app", "cmd"}, c.input...))
|
||||
require.NoError(t, err)
|
||||
assert.True(t, actionRun)
|
||||
}
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -30,10 +30,23 @@ type inspectOutput struct {
|
||||
Layers []string
|
||||
}
|
||||
|
||||
var inspectCmd = cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
type inspectOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
raw bool // Output the raw manifest instead of parsing information about the image
|
||||
}
|
||||
|
||||
func inspectCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := inspectOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "inspect",
|
||||
Usage: "Inspect image IMAGE-NAME",
|
||||
Description: fmt.Sprintf(`
|
||||
Return low-level information about "IMAGE-NAME" in a registry/transport
|
||||
|
||||
Supported transports:
|
||||
@@ -41,94 +54,90 @@ var inspectCmd = cli.Command{
|
||||
|
||||
See skopeo(1) section "IMAGE NAMES" for the expected format
|
||||
`, strings.Join(transports.ListNames(), ", ")),
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "creds",
|
||||
Value: "",
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
ctx := context.Background()
|
||||
|
||||
img, err := parseImage(ctx, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.Bool("raw") {
|
||||
_, err := c.App.Writer.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Possibly overridden for a docker.Image.
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for a docker.Image.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerImg, ok := img.(*docker.Image); ok {
|
||||
outputData.Name = dockerImg.SourceRefFullName()
|
||||
outputData.RepoTags, err = dockerImg.GetRepositoryTags(ctx)
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(out))
|
||||
return nil
|
||||
},
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: append(append([]cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "raw",
|
||||
Usage: "output raw manifest",
|
||||
Destination: &opts.raw,
|
||||
},
|
||||
}, sharedFlags...), imageFlags...),
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *inspectOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
if len(args) != 1 {
|
||||
return errors.New("Exactly one argument expected")
|
||||
}
|
||||
img, err := parseImage(ctx, opts.image, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if opts.raw {
|
||||
_, err := stdout.Write(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error writing manifest to standard output: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
imgInspect, err := img.Inspect(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData := inspectOutput{
|
||||
Name: "", // Set below if DockerReference() is known
|
||||
Tag: imgInspect.Tag,
|
||||
// Digest is set below.
|
||||
RepoTags: []string{}, // Possibly overriden for docker.Transport.
|
||||
Created: imgInspect.Created,
|
||||
DockerVersion: imgInspect.DockerVersion,
|
||||
Labels: imgInspect.Labels,
|
||||
Architecture: imgInspect.Architecture,
|
||||
Os: imgInspect.Os,
|
||||
Layers: imgInspect.Layers,
|
||||
}
|
||||
outputData.Digest, err = manifest.Digest(rawManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
}
|
||||
if dockerRef := img.Reference().DockerReference(); dockerRef != nil {
|
||||
outputData.Name = dockerRef.Name()
|
||||
}
|
||||
if img.Reference().Transport() == docker.Transport {
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outputData.RepoTags, err = docker.GetRepositoryTags(ctx, sys, img.Reference())
|
||||
if err != nil {
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(stdout, "%s\n", string(out))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,126 +1,145 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
var layersCmd = cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if c.NArg() == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
type layersOptions struct {
|
||||
global *globalOptions
|
||||
image *imageOptions
|
||||
}
|
||||
|
||||
func layersCmd(global *globalOptions) cli.Command {
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(global, sharedOpts, "", "")
|
||||
opts := layersOptions{
|
||||
global: global,
|
||||
image: imageOpts,
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "layers",
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
Flags: append(sharedFlags, imageFlags...),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *layersOptions) run(args []string, stdout io.Writer) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if len(args) == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
}
|
||||
|
||||
ctx, cancel := opts.global.commandTimeoutContext()
|
||||
defer cancel()
|
||||
|
||||
sys, err := opts.image.newSystemContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, opts.image, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range args[1:] {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rawSource, err := parseImageSource(ctx, c, c.Args()[0])
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src, err := image.FromSource(ctx, sys, rawSource)
|
||||
if err != nil {
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
type blobDigest struct {
|
||||
digest digest.Digest
|
||||
isConfig bool
|
||||
}
|
||||
var blobDigests []blobDigest
|
||||
for _, dString := range c.Args().Tail() {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
blobDigests = append(blobDigests, blobDigest{digest: d, isConfig: false})
|
||||
}
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(blobDigests) == 0 {
|
||||
layers := src.LayerInfos()
|
||||
seenLayers := map[digest.Digest]struct{}{}
|
||||
for _, info := range layers {
|
||||
if _, ok := seenLayers[info.Digest]; !ok {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: info.Digest, isConfig: false})
|
||||
seenLayers[info.Digest] = struct{}{}
|
||||
}
|
||||
}
|
||||
configInfo := src.ConfigInfo()
|
||||
if configInfo.Digest != "" {
|
||||
blobDigests = append(blobDigests, blobDigest{digest: configInfo.Digest, isConfig: true})
|
||||
}
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tmpDirRef, err := directory.NewReference(tmpDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dest, err := tmpDirRef.NewImageDestination(ctx, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := dest.PutManifest(ctx, manifest); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return dest.Commit(ctx)
|
||||
},
|
||||
return dest.Commit(ctx)
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -15,8 +17,21 @@ import (
|
||||
// and will be populated by the Makefile
|
||||
var gitCommit = ""
|
||||
|
||||
// createApp returns a cli.App to be run or tested.
|
||||
func createApp() *cli.App {
|
||||
type globalOptions struct {
|
||||
debug bool // Enable debug output
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
policyPath string // Path to a signature verification policy file
|
||||
insecurePolicy bool // Use an "allow everything" signature verification policy
|
||||
registriesDirPath string // Path to a "registries.d" registry configuratio directory
|
||||
overrideArch string // Architecture to use for choosing images, instead of the runtime one
|
||||
overrideOS string // OS to use for choosing images, instead of the runtime one
|
||||
commandTimeout time.Duration // Timeout for the command execution
|
||||
}
|
||||
|
||||
// createApp returns a cli.App, and the underlying globalOptions object, to be run or tested.
|
||||
func createApp() (*cli.App, *globalOptions) {
|
||||
opts := globalOptions{}
|
||||
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Name = "skopeo"
|
||||
@@ -28,85 +43,106 @@ func createApp() *cli.App {
|
||||
app.Usage = "Various operations with container images and container image registries"
|
||||
app.Flags = []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Name: "debug",
|
||||
Usage: "enable debug output",
|
||||
Destination: &opts.debug,
|
||||
},
|
||||
cli.BoolTFlag{
|
||||
cli.GenericFlag{
|
||||
Name: "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to container registries (defaults to true)",
|
||||
Hidden: true,
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "policy",
|
||||
Value: "",
|
||||
Usage: "Path to a trust policy file",
|
||||
Name: "policy",
|
||||
Usage: "Path to a trust policy file",
|
||||
Destination: &opts.policyPath,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
Destination: &opts.insecurePolicy,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Value: "",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Name: "registries.d",
|
||||
Usage: "use registry configuration files in `DIR` (e.g. for container signature storage)",
|
||||
Destination: &opts.registriesDirPath,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-arch",
|
||||
Value: "",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Name: "override-arch",
|
||||
Usage: "use `ARCH` instead of the architecture of the machine for choosing images",
|
||||
Destination: &opts.overrideArch,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "override-os",
|
||||
Value: "",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Name: "override-os",
|
||||
Usage: "use `OS` instead of the running OS for choosing images",
|
||||
Destination: &opts.overrideOS,
|
||||
},
|
||||
cli.DurationFlag{
|
||||
Name: "command-timeout",
|
||||
Usage: "timeout for the command execution",
|
||||
Destination: &opts.commandTimeout,
|
||||
},
|
||||
}
|
||||
app.Before = func(c *cli.Context) error {
|
||||
if c.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if c.GlobalIsSet("tls-verify") {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
app.Before = opts.before
|
||||
app.Commands = []cli.Command{
|
||||
copyCmd,
|
||||
inspectCmd,
|
||||
layersCmd,
|
||||
deleteCmd,
|
||||
manifestDigestCmd,
|
||||
standaloneSignCmd,
|
||||
standaloneVerifyCmd,
|
||||
untrustedSignatureDumpCmd,
|
||||
copyCmd(&opts),
|
||||
inspectCmd(&opts),
|
||||
layersCmd(&opts),
|
||||
deleteCmd(&opts),
|
||||
manifestDigestCmd(),
|
||||
standaloneSignCmd(),
|
||||
standaloneVerifyCmd(),
|
||||
untrustedSignatureDumpCmd(),
|
||||
}
|
||||
return app
|
||||
return app, &opts
|
||||
}
|
||||
|
||||
// before is run by the cli package for any command, before running the command-specific handler.
|
||||
func (opts *globalOptions) before(_ *cli.Context) error {
|
||||
if opts.debug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
logrus.Warn("'--tls-verify' is deprecated, please set this on the specific subcommand")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// getPolicyContext handles the global "policy" flag.
|
||||
func getPolicyContext(c *cli.Context) (*signature.PolicyContext, error) {
|
||||
policyPath := c.GlobalString("policy")
|
||||
var policy *signature.Policy // This could be cached across calls, if we had an application context.
|
||||
// getPolicyContext returns a *signature.PolicyContext based on opts.
|
||||
func (opts *globalOptions) getPolicyContext() (*signature.PolicyContext, error) {
|
||||
var policy *signature.Policy // This could be cached across calls in opts.
|
||||
var err error
|
||||
if c.GlobalBool("insecure-policy") {
|
||||
if opts.insecurePolicy {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if policyPath == "" {
|
||||
} else if opts.policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(policyPath)
|
||||
policy, err = signature.NewPolicyFromFile(opts.policyPath)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return signature.NewPolicyContext(policy)
|
||||
}
|
||||
|
||||
// commandTimeoutContext returns a context.Context and a cancellation callback based on opts.
|
||||
// The caller should usually "defer cancel()" immediately after calling this.
|
||||
func (opts *globalOptions) commandTimeoutContext() (context.Context, context.CancelFunc) {
|
||||
ctx := context.Background()
|
||||
var cancel context.CancelFunc = func() {}
|
||||
if opts.commandTimeout > 0 {
|
||||
ctx, cancel = context.WithTimeout(ctx, opts.commandTimeout)
|
||||
}
|
||||
return ctx, cancel
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import "bytes"
|
||||
// runSkopeo creates an app object and runs it with args, with an implied first "skopeo".
|
||||
// Returns output intended for stdout and the returned error, if any.
|
||||
func runSkopeo(args ...string) (string, error) {
|
||||
app := createApp()
|
||||
app, _ := createApp()
|
||||
stdout := bytes.Buffer{}
|
||||
app.Writer = &stdout
|
||||
args = append([]string{"skopeo"}, args...)
|
||||
|
||||
@@ -3,17 +3,31 @@ package main
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func manifestDigest(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
type manifestDigestOptions struct {
|
||||
}
|
||||
|
||||
func manifestDigestCmd() cli.Command {
|
||||
opts := manifestDigestOptions{}
|
||||
return cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *manifestDigestOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo manifest-digest manifest")
|
||||
}
|
||||
manifestPath := context.Args()[0]
|
||||
manifestPath := args[0]
|
||||
|
||||
man, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -23,13 +37,6 @@ func manifestDigest(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error computing digest: %v", err)
|
||||
}
|
||||
fmt.Fprintf(context.App.Writer, "%s\n", digest)
|
||||
fmt.Fprintf(stdout, "%s\n", digest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var manifestDigestCmd = cli.Command{
|
||||
Name: "manifest-digest",
|
||||
Usage: "Compute a manifest digest of a file",
|
||||
ArgsUsage: "MANIFEST",
|
||||
Action: manifestDigest,
|
||||
}
|
||||
|
||||
@@ -4,20 +4,41 @@ import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func standaloneSign(c *cli.Context) error {
|
||||
outputFile := c.String("output")
|
||||
if len(c.Args()) != 3 || outputFile == "" {
|
||||
type standaloneSignOptions struct {
|
||||
output string // Output file path
|
||||
}
|
||||
|
||||
func standaloneSignCmd() cli.Command {
|
||||
opts := standaloneSignOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: commandAction(opts.run),
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
Destination: &opts.output,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneSignOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 3 || opts.output == "" {
|
||||
return errors.New("Usage: skopeo standalone-sign manifest docker-reference key-fingerprint -o signature")
|
||||
}
|
||||
manifestPath := c.Args()[0]
|
||||
dockerReference := c.Args()[1]
|
||||
fingerprint := c.Args()[2]
|
||||
manifestPath := args[0]
|
||||
dockerReference := args[1]
|
||||
fingerprint := args[2]
|
||||
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -34,33 +55,33 @@ func standaloneSign(c *cli.Context) error {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(outputFile, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", outputFile, err)
|
||||
if err := ioutil.WriteFile(opts.output, signature, 0644); err != nil {
|
||||
return fmt.Errorf("Error writing signature to %s: %v", opts.output, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneSignCmd = cli.Command{
|
||||
Name: "standalone-sign",
|
||||
Usage: "Create a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT",
|
||||
Action: standaloneSign,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "output, o",
|
||||
Usage: "output the signature to `SIGNATURE`",
|
||||
},
|
||||
},
|
||||
type standaloneVerifyOptions struct {
|
||||
}
|
||||
|
||||
func standaloneVerify(c *cli.Context) error {
|
||||
if len(c.Args()) != 4 {
|
||||
func standaloneVerifyCmd() cli.Command {
|
||||
opts := standaloneVerifyOptions{}
|
||||
return cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *standaloneVerifyOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 4 {
|
||||
return errors.New("Usage: skopeo standalone-verify manifest docker-reference key-fingerprint signature")
|
||||
}
|
||||
manifestPath := c.Args()[0]
|
||||
expectedDockerReference := c.Args()[1]
|
||||
expectedFingerprint := c.Args()[2]
|
||||
signaturePath := c.Args()[3]
|
||||
manifestPath := args[0]
|
||||
expectedDockerReference := args[1]
|
||||
expectedFingerprint := args[2]
|
||||
signaturePath := args[3]
|
||||
|
||||
unverifiedManifest, err := ioutil.ReadFile(manifestPath)
|
||||
if err != nil {
|
||||
@@ -81,22 +102,35 @@ func standaloneVerify(c *cli.Context) error {
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(c.App.Writer, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
fmt.Fprintf(stdout, "Signature verified, digest %s\n", sig.DockerManifestDigest)
|
||||
return nil
|
||||
}
|
||||
|
||||
var standaloneVerifyCmd = cli.Command{
|
||||
Name: "standalone-verify",
|
||||
Usage: "Verify a signature using local files",
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: standaloneVerify,
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
type untrustedSignatureDumpOptions struct {
|
||||
}
|
||||
|
||||
func untrustedSignatureDump(c *cli.Context) error {
|
||||
if len(c.Args()) != 1 {
|
||||
func untrustedSignatureDumpCmd() cli.Command {
|
||||
opts := untrustedSignatureDumpOptions{}
|
||||
return cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: commandAction(opts.run),
|
||||
}
|
||||
}
|
||||
|
||||
func (opts *untrustedSignatureDumpOptions) run(args []string, stdout io.Writer) error {
|
||||
if len(args) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := c.Args()[0]
|
||||
untrustedSignaturePath := args[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
@@ -111,20 +145,6 @@ func untrustedSignatureDump(c *cli.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(c.App.Writer, string(untrustedOut))
|
||||
fmt.Fprintln(stdout, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
var untrustedSignatureDumpCmd = cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: untrustedSignatureDump,
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
@@ -10,29 +11,125 @@ import (
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemContext, error) {
|
||||
// errorShouldDisplayUsage is a subtype of error used by command handlers to indicate that cli.ShowSubcommandHelp should be called.
|
||||
type errorShouldDisplayUsage struct {
|
||||
error
|
||||
}
|
||||
|
||||
// commandAction intermediates between the cli.ActionFunc interface and the real handler,
|
||||
// primarily to ensure that cli.Context is not available to the handler, which in turn
|
||||
// makes sure that the cli.String() etc. flag access functions are not used,
|
||||
// and everything is done using the *Options structures and the Destination: members of cli.Flag.
|
||||
// handler may return errorShouldDisplayUsage to cause cli.ShowSubcommandHelp to be called.
|
||||
func commandAction(handler func(args []string, stdout io.Writer) error) cli.ActionFunc {
|
||||
return func(c *cli.Context) error {
|
||||
err := handler(([]string)(c.Args()), c.App.Writer)
|
||||
if _, ok := err.(errorShouldDisplayUsage); ok {
|
||||
cli.ShowSubcommandHelp(c)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// sharedImageOptions collects CLI flags which are image-related, but do not change across images.
|
||||
// This really should be a part of globalOptions, but that would break existing users of (skopeo copy --authfile=).
|
||||
type sharedImageOptions struct {
|
||||
authFilePath string // Path to a */containers/auth.json
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into sharedImageOptions, and the managed sharedImageOptions structure.
|
||||
func sharedImageFlags() ([]cli.Flag, *sharedImageOptions) {
|
||||
opts := sharedImageOptions{}
|
||||
return []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "authfile",
|
||||
Usage: "path of the authentication file. Default is ${XDG_RUNTIME_DIR}/containers/auth.json",
|
||||
Destination: &opts.authFilePath,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// imageOptions collects CLI flags which are the same across subcommands, but may be different for each image
|
||||
// (e.g. may differ between the source and destination of a copy)
|
||||
type imageOptions struct {
|
||||
global *globalOptions // May be shared across several imageOptions instances.
|
||||
shared *sharedImageOptions // May be shared across several imageOptions instances.
|
||||
credsOption optionalString // username[:password] for accessing a registry
|
||||
dockerCertPath string // A directory using Docker-like *.{crt,cert,key} files for connecting to a registry or a daemon
|
||||
tlsVerify optionalBool // Require HTTPS and verify certificates (for docker: and docker-daemon:)
|
||||
sharedBlobDir string // A directory to use for OCI blobs, shared across repositories
|
||||
dockerDaemonHost string // docker-daemon: host to connect to
|
||||
}
|
||||
|
||||
// imageFlags prepares a collection of CLI flags writing into imageOptions, and the managed imageOptions structure.
|
||||
func imageFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageOptions) {
|
||||
opts := imageOptions{
|
||||
global: global,
|
||||
shared: shared,
|
||||
}
|
||||
|
||||
// This is horribly ugly, but we need to support the old option forms of (skopeo copy) for compatibility.
|
||||
// Don't add any more cases like this.
|
||||
credsOptionExtra := ""
|
||||
if credsOptionAlias != "" {
|
||||
credsOptionExtra += "," + credsOptionAlias
|
||||
}
|
||||
|
||||
return []cli.Flag{
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "creds" + credsOptionExtra,
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
Value: newOptionalStringValue(&opts.credsOption),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "cert-dir",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry or daemon",
|
||||
Destination: &opts.dockerCertPath,
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: flagPrefix + "tls-verify",
|
||||
Usage: "require HTTPS and verify certificates when talking to the container registry or daemon (defaults to true)",
|
||||
Value: newOptionalBoolValue(&opts.tlsVerify),
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "shared-blob-dir",
|
||||
Usage: "`DIRECTORY` to use to share blobs across OCI repositories",
|
||||
Destination: &opts.sharedBlobDir,
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "daemon-host",
|
||||
Usage: "use docker daemon host at `HOST` (docker-daemon: only)",
|
||||
Destination: &opts.dockerDaemonHost,
|
||||
},
|
||||
}, &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx := &types.SystemContext{
|
||||
RegistriesDirPath: c.GlobalString("registries.d"),
|
||||
ArchitectureChoice: c.GlobalString("override-arch"),
|
||||
OSChoice: c.GlobalString("override-os"),
|
||||
DockerCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
// DEPRECATED: keep this here for backward compatibility, but override
|
||||
// them if per subcommand flags are provided (see below).
|
||||
DockerInsecureSkipTLSVerify: !c.GlobalBoolT("tls-verify"),
|
||||
OSTreeTmpDirPath: c.String(flagPrefix + "ostree-tmp-dir"),
|
||||
OCISharedBlobDirPath: c.String(flagPrefix + "shared-blob-dir"),
|
||||
DirForceCompress: c.Bool(flagPrefix + "compress"),
|
||||
AuthFilePath: c.String("authfile"),
|
||||
DockerDaemonHost: c.String(flagPrefix + "daemon-host"),
|
||||
DockerDaemonCertPath: c.String(flagPrefix + "cert-dir"),
|
||||
DockerDaemonInsecureSkipTLSVerify: !c.BoolT(flagPrefix + "tls-verify"),
|
||||
RegistriesDirPath: opts.global.registriesDirPath,
|
||||
ArchitectureChoice: opts.global.overrideArch,
|
||||
OSChoice: opts.global.overrideOS,
|
||||
DockerCertPath: opts.dockerCertPath,
|
||||
OCISharedBlobDirPath: opts.sharedBlobDir,
|
||||
AuthFilePath: opts.shared.authFilePath,
|
||||
DockerDaemonHost: opts.dockerDaemonHost,
|
||||
DockerDaemonCertPath: opts.dockerCertPath,
|
||||
}
|
||||
if c.IsSet(flagPrefix + "tls-verify") {
|
||||
ctx.DockerInsecureSkipTLSVerify = !c.BoolT(flagPrefix + "tls-verify")
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerDaemonInsecureSkipTLSVerify = !opts.tlsVerify.value
|
||||
}
|
||||
if c.IsSet(flagPrefix + "creds") {
|
||||
// DEPRECATED: We support this for backward compatibility, but override it if a per-image flag is provided.
|
||||
if opts.global.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.global.tlsVerify.value)
|
||||
}
|
||||
if opts.tlsVerify.present {
|
||||
ctx.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!opts.tlsVerify.value)
|
||||
}
|
||||
if opts.credsOption.present {
|
||||
var err error
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(c.String(flagPrefix + "creds"))
|
||||
ctx.DockerAuthConfig, err = getDockerAuth(opts.credsOption.value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -40,6 +137,45 @@ func contextFromGlobalOptions(c *cli.Context, flagPrefix string) (*types.SystemC
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
// imageDestOptions is a superset of imageOptions specialized for iamge destinations.
|
||||
type imageDestOptions struct {
|
||||
*imageOptions
|
||||
osTreeTmpDir string // A directory to use for OSTree temporary files
|
||||
dirForceCompression bool // Compress layers when saving to the dir: transport
|
||||
}
|
||||
|
||||
// imageDestFlags prepares a collection of CLI flags writing into imageDestOptions, and the managed imageDestOptions structure.
|
||||
func imageDestFlags(global *globalOptions, shared *sharedImageOptions, flagPrefix, credsOptionAlias string) ([]cli.Flag, *imageDestOptions) {
|
||||
genericFlags, genericOptions := imageFlags(global, shared, flagPrefix, credsOptionAlias)
|
||||
opts := imageDestOptions{imageOptions: genericOptions}
|
||||
|
||||
return append(genericFlags, []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: flagPrefix + "ostree-tmp-dir",
|
||||
Usage: "`DIRECTORY` to use for OSTree temporary files",
|
||||
Destination: &opts.osTreeTmpDir,
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: flagPrefix + "compress",
|
||||
Usage: "Compress tarball image layers when saving to directory using the 'dir' transport. (default is same compression type as source)",
|
||||
Destination: &opts.dirForceCompression,
|
||||
},
|
||||
}...), &opts
|
||||
}
|
||||
|
||||
// newSystemContext returns a *types.SystemContext corresponding to opts.
|
||||
// It is guaranteed to return a fresh instance, so it is safe to make additional updates to it.
|
||||
func (opts *imageDestOptions) newSystemContext() (*types.SystemContext, error) {
|
||||
ctx, err := opts.imageOptions.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx.OSTreeTmpDirPath = opts.osTreeTmpDir
|
||||
ctx.DirForceCompress = opts.dirForceCompression
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
func parseCreds(creds string) (string, string, error) {
|
||||
if creds == "" {
|
||||
return "", "", errors.New("credentials can't be empty")
|
||||
@@ -67,13 +203,12 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
|
||||
// parseImage converts image URL-like string to an initialized handler for that image.
|
||||
// The caller must call .Close() on the returned ImageCloser.
|
||||
func parseImage(ctx context.Context, c *cli.Context) (types.ImageCloser, error) {
|
||||
imgName := c.Args().First()
|
||||
ref, err := alltransports.ParseImageName(imgName)
|
||||
func parseImage(ctx context.Context, opts *imageOptions, name string) (types.ImageCloser, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -82,12 +217,12 @@ func parseImage(ctx context.Context, c *cli.Context) (types.ImageCloser, error)
|
||||
|
||||
// parseImageSource converts image URL-like string to an ImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(ctx context.Context, c *cli.Context, name string) (types.ImageSource, error) {
|
||||
func parseImageSource(ctx context.Context, opts *imageOptions, name string) (types.ImageSource, error) {
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sys, err := contextFromGlobalOptions(c, "")
|
||||
sys, err := opts.newSystemContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
184
cmd/skopeo/utils_test.go
Normal file
184
cmd/skopeo/utils_test.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// fakeGlobalOptions creates globalOptions and sets it according to flags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeGlobalOptions(t *testing.T, flags []string) *globalOptions {
|
||||
app, opts := createApp()
|
||||
|
||||
flagSet := flag.NewFlagSet(app.Name, flag.ContinueOnError)
|
||||
for _, f := range app.Flags {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(flags)
|
||||
require.NoError(t, err)
|
||||
|
||||
return opts
|
||||
}
|
||||
|
||||
// fakeImageOptions creates imageOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
}, res)
|
||||
|
||||
// Global/per-command tlsVerify behavior
|
||||
for _, c := range []struct {
|
||||
global, cmd string
|
||||
expectedDocker types.OptionalBool
|
||||
expectedDockerDaemon bool
|
||||
}{
|
||||
{"", "", types.OptionalBoolUndefined, false},
|
||||
{"", "false", types.OptionalBoolTrue, true},
|
||||
{"", "true", types.OptionalBoolFalse, false},
|
||||
{"false", "", types.OptionalBoolTrue, false},
|
||||
{"false", "false", types.OptionalBoolTrue, true},
|
||||
{"false", "true", types.OptionalBoolFalse, false},
|
||||
{"true", "", types.OptionalBoolFalse, false},
|
||||
{"true", "false", types.OptionalBoolTrue, true},
|
||||
{"true", "true", types.OptionalBoolFalse, false},
|
||||
} {
|
||||
globalFlags := []string{}
|
||||
if c.global != "" {
|
||||
globalFlags = append(globalFlags, "--tls-verify="+c.global)
|
||||
}
|
||||
cmdFlags := []string{}
|
||||
if c.cmd != "" {
|
||||
cmdFlags = append(cmdFlags, "--dest-tls-verify="+c.cmd)
|
||||
}
|
||||
opts := fakeImageOptions(t, "dest-", globalFlags, cmdFlags)
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.expectedDocker, res.DockerInsecureSkipTLSVerify, "%#v", c)
|
||||
assert.Equal(t, c.expectedDockerDaemon, res.DockerDaemonInsecureSkipTLSVerify, "%#v", c)
|
||||
}
|
||||
|
||||
// Invalid option values
|
||||
opts = fakeImageOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
// fakeImageDestOptions creates imageDestOptions and sets it according to globalFlags/cmdFlags.
|
||||
// NOTE: This is QUITE FAKE; none of the urfave/cli normalization and the like happens.
|
||||
func fakeImageDestOptions(t *testing.T, flagPrefix string, globalFlags []string, cmdFlags []string) *imageDestOptions {
|
||||
globalOpts := fakeGlobalOptions(t, globalFlags)
|
||||
|
||||
sharedFlags, sharedOpts := sharedImageFlags()
|
||||
imageFlags, imageOpts := imageDestFlags(globalOpts, sharedOpts, flagPrefix, "")
|
||||
flagSet := flag.NewFlagSet("fakeImageDestOptions", flag.ContinueOnError)
|
||||
for _, f := range append(sharedFlags, imageFlags...) {
|
||||
f.Apply(flagSet)
|
||||
}
|
||||
err := flagSet.Parse(cmdFlags)
|
||||
require.NoError(t, err)
|
||||
return imageOpts
|
||||
}
|
||||
|
||||
func TestImageDestOptionsNewSystemContext(t *testing.T) {
|
||||
// Default state
|
||||
opts := fakeImageDestOptions(t, "dest-", []string{}, []string{})
|
||||
res, err := opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Explicitly set everything to default, except for when the default is “not present”
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{
|
||||
"--dest-compress=false",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{}, res)
|
||||
|
||||
// Set everything to non-default values.
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{
|
||||
"--registries.d", "/srv/registries.d",
|
||||
"--override-arch", "overridden-arch",
|
||||
"--override-os", "overridden-os",
|
||||
}, []string{
|
||||
"--authfile", "/srv/authfile",
|
||||
"--dest-cert-dir", "/srv/cert-dir",
|
||||
"--dest-ostree-tmp-dir", "/srv/ostree-tmp-dir",
|
||||
"--dest-shared-blob-dir", "/srv/shared-blob-dir",
|
||||
"--dest-compress=true",
|
||||
"--dest-daemon-host", "daemon-host.example.com",
|
||||
"--dest-tls-verify=false",
|
||||
"--dest-creds", "creds-user:creds-password",
|
||||
})
|
||||
res, err = opts.newSystemContext()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, &types.SystemContext{
|
||||
RegistriesDirPath: "/srv/registries.d",
|
||||
AuthFilePath: "/srv/authfile",
|
||||
ArchitectureChoice: "overridden-arch",
|
||||
OSChoice: "overridden-os",
|
||||
OCISharedBlobDirPath: "/srv/shared-blob-dir",
|
||||
DockerCertPath: "/srv/cert-dir",
|
||||
DockerInsecureSkipTLSVerify: types.OptionalBoolTrue,
|
||||
DockerAuthConfig: &types.DockerAuthConfig{Username: "creds-user", Password: "creds-password"},
|
||||
OSTreeTmpDirPath: "/srv/ostree-tmp-dir",
|
||||
DockerDaemonCertPath: "/srv/cert-dir",
|
||||
DockerDaemonHost: "daemon-host.example.com",
|
||||
DockerDaemonInsecureSkipTLSVerify: true,
|
||||
DirForceCompress: true,
|
||||
}, res)
|
||||
|
||||
// Invalid option values in imageOptions
|
||||
opts = fakeImageDestOptions(t, "dest-", []string{}, []string{"--dest-creds", ""})
|
||||
_, err = opts.newSystemContext()
|
||||
assert.Error(t, err)
|
||||
}
|
||||
@@ -110,6 +110,7 @@ _skopeo_skopeo() {
|
||||
--registries.d
|
||||
--override-arch
|
||||
--override-os
|
||||
--command-timeout
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
|
||||
@@ -59,6 +59,8 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--override-os** _OS_ Use _OS_ instead of the running OS for choosing images.
|
||||
|
||||
**--command-timeout** _duration_ Timeout for the command execution.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
**--version**|**-v** print the version number
|
||||
|
||||
@@ -6,7 +6,7 @@ set -e
|
||||
#
|
||||
# Requirements:
|
||||
# - The current directory should be a checkout of the skopeo source code
|
||||
# (https://github.com/projectatomic/skopeo). Whatever version is checked out
|
||||
# (https://github.com/containers/skopeo). Whatever version is checked out
|
||||
# will be built.
|
||||
# - The script is intended to be run inside the docker container specified
|
||||
# in the Dockerfile at the root of the source. In other words:
|
||||
@@ -19,7 +19,7 @@ set -e
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export SKOPEO_PKG='github.com/projectatomic/skopeo'
|
||||
export SKOPEO_PKG='github.com/containers/skopeo'
|
||||
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
export MAKEDIR="$SCRIPTDIR/make"
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ if [ -z "$VALIDATE_UPSTREAM" ]; then
|
||||
# this is kind of an expensive check, so let's not do this twice if we
|
||||
# are running more than one validate bundlescript
|
||||
|
||||
VALIDATE_REPO='https://github.com/projectatomic/skopeo.git'
|
||||
VALIDATE_REPO='https://github.com/containers/skopeo.git'
|
||||
VALIDATE_BRANCH='master'
|
||||
|
||||
if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then
|
||||
|
||||
@@ -4,13 +4,14 @@ set -e
|
||||
export GOPATH=$(pwd)/_gopath
|
||||
export PATH=$GOPATH/bin:$PATH
|
||||
|
||||
_projectatomic="${GOPATH}/src/github.com/projectatomic"
|
||||
mkdir -vp ${_projectatomic}
|
||||
ln -vsf $(pwd) ${_projectatomic}/skopeo
|
||||
_containers="${GOPATH}/src/github.com/containers"
|
||||
mkdir -vp ${_containers}
|
||||
ln -vsf $(pwd) ${_containers}/skopeo
|
||||
|
||||
go get -u github.com/cpuguy83/go-md2man github.com/golang/lint/golint
|
||||
go version
|
||||
go get -u github.com/cpuguy83/go-md2man golang.org/x/lint/golint
|
||||
|
||||
cd ${_projectatomic}/skopeo
|
||||
cd ${_containers}/skopeo
|
||||
make validate-local test-unit-local binary-local
|
||||
sudo make install
|
||||
skopeo -v
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"os/exec"
|
||||
"testing"
|
||||
|
||||
"github.com/containers/skopeo/version"
|
||||
"github.com/go-check/check"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
21
vendor.conf
21
vendor.conf
@@ -1,9 +1,11 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/urfave/cli v1.20.0
|
||||
github.com/kr/pretty v0.1.0
|
||||
github.com/kr/text v0.1.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/image f0cbc16b444d729362c78244c715b45bf4019b71
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27
|
||||
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||
github.com/containers/storage master
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/go-check/check v1
|
||||
@@ -13,6 +15,7 @@ github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt master
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
@@ -39,7 +42,7 @@ github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3
|
||||
github.com/docker/libtrust master
|
||||
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec 149252121d044fddff670adcdc67f33148e16226
|
||||
github.com/opencontainers/image-spec 7b1e489870acb042978a3935d2fb76f8a79aff81
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/image-tools 6d941547fa1df31900990b3fb47ec2468c9c6469
|
||||
@@ -47,11 +50,10 @@ github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
k8s.io/client-go master
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
@@ -60,8 +62,11 @@ github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
golang.org/x/sys master
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/BurntSushi/toml master
|
||||
github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
935
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
935
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
@@ -0,0 +1,935 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## A message from the author
|
||||
|
||||
> The original goal of Bolt was to provide a simple pure Go key/value store and to
|
||||
> not bloat the code with extraneous features. To that end, the project has been
|
||||
> a success. However, this limited scope also means that the project is complete.
|
||||
>
|
||||
> Maintaining an open source database requires an immense amount of time and energy.
|
||||
> Changes to the code can have unintended and sometimes catastrophic effects so
|
||||
> even simple changes require hours and hours of careful testing and validation.
|
||||
>
|
||||
> Unfortunately I no longer have the time or energy to continue this work. Bolt is
|
||||
> in a stable state and has years of successful production use. As such, I feel that
|
||||
> leaving it in its current state is the most prudent course of action.
|
||||
>
|
||||
> If you are interested in using a more featureful version of Bolt, I suggest that
|
||||
> you look at the CoreOS fork called [bbolt](https://github.com/coreos/bbolt).
|
||||
|
||||
- Ben Johnson ([@benbjohnson](https://twitter.com/benbjohnson))
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
|
||||
|
||||
```go
|
||||
|
||||
// createUser creates a new user in the given account.
|
||||
func createUser(accountID int, u *User) error {
|
||||
// Start the transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Retrieve the root bucket for the account.
|
||||
// Assume this has already been created when the account was set up.
|
||||
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
|
||||
|
||||
// Setup the users bucket.
|
||||
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate an ID for the new user.
|
||||
userID, err := bkt.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.ID = userID
|
||||
|
||||
// Marshal and save the encoded user.
|
||||
if buf, err := json.Marshal(u); err != nil {
|
||||
return err
|
||||
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Bolt uses an exclusive write lock on the database file so it cannot be
|
||||
shared by multiple processes.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
||||
10
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
10
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
||||
12
vendor/github.com/boltdb/bolt/bolt_arm64.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_arm64.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
10
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return syscall.Fdatasync(int(db.file.Fd()))
|
||||
}
|
||||
27
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
27
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
)
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
if db.data != nil {
|
||||
return msync(db)
|
||||
}
|
||||
return db.file.Sync()
|
||||
}
|
||||
9
vendor/github.com/boltdb/bolt/bolt_ppc.go
generated
vendored
Normal file
9
vendor/github.com/boltdb/bolt/bolt_ppc.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build ppc
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
12
vendor/github.com/boltdb/bolt/bolt_ppc64.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_ppc64.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build ppc64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build ppc64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
12
vendor/github.com/boltdb/bolt/bolt_s390x.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_s390x.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// +build s390x
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
||||
89
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
89
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
// +build !windows,!plan9,!solaris
|
||||
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
||||
90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
generated
vendored
Normal file
90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
generated
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
144
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
144
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
lockExt = ".lock"
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||
errLockViolation syscall.Errno = 0x21
|
||||
)
|
||||
|
||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
// Create a separate lock file on windows because a process
|
||||
// cannot share an exclusive lock on the same file. This is
|
||||
// needed during Tx.WriteTo().
|
||||
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.lockfile = f
|
||||
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path + lockExt)
|
||||
return err
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
if addr == 0 {
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
// Close mapping handle.
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a pointer from a file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func munmap(db *DB) error {
|
||||
if db.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
8
vendor/github.com/boltdb/bolt/boltsync_unix.go
generated
vendored
Normal file
8
vendor/github.com/boltdb/bolt/boltsync_unix.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// +build !windows,!plan9,!linux,!openbsd
|
||||
|
||||
package bolt
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
777
vendor/github.com/boltdb/bolt/bucket.go
generated
vendored
Normal file
777
vendor/github.com/boltdb/bolt/bucket.go
generated
vendored
Normal file
@@ -0,0 +1,777 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxKeySize is the maximum length of a key, in bytes.
|
||||
MaxKeySize = 32768
|
||||
|
||||
// MaxValueSize is the maximum length of a value, in bytes.
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
minFillPercent = 0.1
|
||||
maxFillPercent = 1.0
|
||||
)
|
||||
|
||||
// DefaultFillPercent is the percentage that split pages are filled.
|
||||
// This value can be changed by setting Bucket.FillPercent.
|
||||
const DefaultFillPercent = 0.5
|
||||
|
||||
// Bucket represents a collection of key/value pairs inside the database.
|
||||
type Bucket struct {
|
||||
*bucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[pgid]*node // node cache
|
||||
|
||||
// Sets the threshold for filling nodes when they split. By default,
|
||||
// the bucket will fill to 50% but it can be useful to increase this
|
||||
// amount if you know that your write workloads are mostly append-only.
|
||||
//
|
||||
// This is non-persisted across transactions so it must be set in every Tx.
|
||||
FillPercent float64
|
||||
}
|
||||
|
||||
// bucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type bucket struct {
|
||||
root pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
// newBucket returns a new bucket associated with a transaction.
|
||||
func newBucket(tx *Tx) Bucket {
|
||||
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||
if tx.writable {
|
||||
b.buckets = make(map[string]*Bucket)
|
||||
b.nodes = make(map[pgid]*node)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Tx returns the tx of the bucket.
|
||||
func (b *Bucket) Tx() *Tx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// Root returns the root of the bucket.
|
||||
func (b *Bucket) Root() pgid {
|
||||
return b.root
|
||||
}
|
||||
|
||||
// Writable returns whether the bucket is writable.
|
||||
func (b *Bucket) Writable() bool {
|
||||
return b.tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the bucket.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (b *Bucket) Cursor() *Cursor {
|
||||
// Update transaction statistics.
|
||||
b.tx.stats.CursorCount++
|
||||
|
||||
// Allocate and return a cursor.
|
||||
return &Cursor{
|
||||
bucket: b,
|
||||
stack: make([]elemRef, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket retrieves a nested bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
if b.buckets != nil {
|
||||
if child := b.buckets[string(name)]; child != nil {
|
||||
return child
|
||||
}
|
||||
}
|
||||
|
||||
// Move cursor to key.
|
||||
c := b.Cursor()
|
||||
k, v, flags := c.seek(name)
|
||||
|
||||
// Return nil if the key doesn't exist or it is not a bucket.
|
||||
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise create a bucket and cache it.
|
||||
var child = b.openBucket(v)
|
||||
if b.buckets != nil {
|
||||
b.buckets[string(name)] = child
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
// Helper method that re-interprets a sub-bucket value
|
||||
// from a parent into a Bucket
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
// Save a reference to the inline page if the bucket is inline.
|
||||
if child.root == 0 {
|
||||
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
}
|
||||
|
||||
return &child
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if b.tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key.
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
}
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
bucket: &bucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(key), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
child, err := b.CreateBucket(key)
|
||||
if err == ErrBucketExists {
|
||||
return b.Bucket(key), nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return child, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(key, k) {
|
||||
return ErrBucketNotFound
|
||||
} else if (flags & bucketLeafFlag) == 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove cached copy.
|
||||
delete(b.buckets, string(key))
|
||||
|
||||
// Release all bucket pages to freelist.
|
||||
child.nodes = nil
|
||||
child.rootNode = nil
|
||||
child.free()
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the value for a key in the bucket.
|
||||
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||
// The returned value is only valid for the life of the transaction.
|
||||
func (b *Bucket) Get(key []byte) []byte {
|
||||
k, v, flags := b.Cursor().seek(key)
|
||||
|
||||
// Return nil if this is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If our target node isn't the same key as what's passed in then return nil.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Put sets the value for a key in the bucket.
|
||||
// If the key exist then its previous value will be overwritten.
|
||||
// Supplied value must remain valid for the life of the transaction.
|
||||
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return ErrValueTooLarge
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the bucket.
|
||||
// If the key does not exist then nothing is done and a nil error is returned.
|
||||
// Returns an error if the bucket was created from a read-only transaction.
|
||||
func (b *Bucket) Delete(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
return 0, ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return 0, ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence++
|
||||
return b.bucket.sequence, nil
|
||||
}
|
||||
|
||||
// ForEach executes a function for each key/value pair in a bucket.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller. The provided function must not modify
|
||||
// the bucket; this will result in undefined behavior.
|
||||
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if err := fn(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns stats on a bucket.
|
||||
func (b *Bucket) Stats() BucketStats {
|
||||
var s, subStats BucketStats
|
||||
pageSize := b.tx.db.pageSize
|
||||
s.BucketN += 1
|
||||
if b.root == 0 {
|
||||
s.InlineBucketN += 1
|
||||
}
|
||||
b.forEachPage(func(p *page, depth int) {
|
||||
if (p.flags & leafPageFlag) != 0 {
|
||||
s.KeyN += int(p.count)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
used := pageHeaderSize
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
// of the last element's key/value equals to the total of the sizes
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
// Do that by iterating over all element headers
|
||||
// looking for the ones with the bucketLeafFlag.
|
||||
for i := uint16(0); i < p.count; i++ {
|
||||
e := p.leafPageElement(i)
|
||||
if (e.flags & bucketLeafFlag) != 0 {
|
||||
// For any bucket element, open the element value
|
||||
// and recursively call Stats on the contained bucket.
|
||||
subStats.Add(b.openBucket(e.value()).Stats())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (p.flags & branchPageFlag) != 0 {
|
||||
s.BranchPageN++
|
||||
lastElement := p.branchPageElement(p.count - 1)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
// Keep track of maximum page depth.
|
||||
if depth+1 > s.Depth {
|
||||
s.Depth = (depth + 1)
|
||||
}
|
||||
})
|
||||
|
||||
// Alloc stats can be computed from page counts and pageSize.
|
||||
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
|
||||
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
|
||||
|
||||
// Add the max depth of sub-buckets to get total nested depth.
|
||||
s.Depth += subStats.Depth
|
||||
// Add the stats for all sub-buckets
|
||||
s.Add(subStats)
|
||||
return s
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||
// If we have an inline page then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise traverse the page hierarchy.
|
||||
b.tx.forEachPage(b.root, 0, fn)
|
||||
}
|
||||
|
||||
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||
// This also includes inline pages.
|
||||
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||
// If we have an inline page or root node then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, nil, 0)
|
||||
return
|
||||
}
|
||||
b._forEachPageNode(b.root, 0, fn)
|
||||
}
|
||||
|
||||
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||
var p, n = b.pageNode(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, n, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if p != nil {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !n.isLeaf {
|
||||
for _, inode := range n.inodes {
|
||||
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spill writes all the nodes for this bucket to dirty pages.
|
||||
func (b *Bucket) spill() error {
|
||||
// Spill all child buckets first.
|
||||
for name, child := range b.buckets {
|
||||
// If the child bucket is small enough and it has no child buckets then
|
||||
// write it inline into the parent bucket's page. Otherwise spill it
|
||||
// like a normal bucket and make the parent value a pointer to the page.
|
||||
var value []byte
|
||||
if child.inlineable() {
|
||||
child.free()
|
||||
value = child.write()
|
||||
} else {
|
||||
if err := child.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the child bucket header in this bucket.
|
||||
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.bucket
|
||||
}
|
||||
|
||||
// Skip writing the bucket if there are no materialized nodes.
|
||||
if child.rootNode == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update parent node.
|
||||
var c = b.Cursor()
|
||||
k, _, flags := c.seek([]byte(name))
|
||||
if !bytes.Equal([]byte(name), k) {
|
||||
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||
}
|
||||
if flags&bucketLeafFlag == 0 {
|
||||
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||
}
|
||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||
}
|
||||
|
||||
// Ignore if there's not a materialized root node.
|
||||
if b.rootNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill nodes.
|
||||
if err := b.rootNode.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.rootNode = b.rootNode.root()
|
||||
|
||||
// Update the root node for this bucket.
|
||||
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||
}
|
||||
b.root = b.rootNode.pgid
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inlineable returns true if a bucket is small enough to be written inline
|
||||
// and if it contains no subbuckets. Otherwise returns false.
|
||||
func (b *Bucket) inlineable() bool {
|
||||
var n = b.rootNode
|
||||
|
||||
// Bucket must only contain a single leaf node.
|
||||
if n == nil || !n.isLeaf {
|
||||
return false
|
||||
}
|
||||
|
||||
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
} else if size > b.maxInlineBucketSize() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
func (b *Bucket) write() []byte {
|
||||
// Allocate the appropriate size.
|
||||
var n = b.rootNode
|
||||
var value = make([]byte, bucketHeaderSize+n.size())
|
||||
|
||||
// Write a bucket header.
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.bucket
|
||||
|
||||
// Convert byte slice to a fake page and write the root node.
|
||||
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
n.write(p)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// rebalance attempts to balance all nodes.
|
||||
func (b *Bucket) rebalance() {
|
||||
for _, n := range b.nodes {
|
||||
n.rebalance()
|
||||
}
|
||||
for _, child := range b.buckets {
|
||||
child.rebalance()
|
||||
}
|
||||
}
|
||||
|
||||
// node creates a node from a page and associates it with a given parent.
|
||||
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
_assert(b.nodes != nil, "nodes map expected")
|
||||
|
||||
// Retrieve node if it's already been created.
|
||||
if n := b.nodes[pgid]; n != nil {
|
||||
return n
|
||||
}
|
||||
|
||||
// Otherwise create a node and cache it.
|
||||
n := &node{bucket: b, parent: parent}
|
||||
if parent == nil {
|
||||
b.rootNode = n
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
}
|
||||
|
||||
// Use the inline page if this is an inline bucket.
|
||||
var p = b.page
|
||||
if p == nil {
|
||||
p = b.tx.page(pgid)
|
||||
}
|
||||
|
||||
// Read the page into the node and cache it.
|
||||
n.read(p)
|
||||
b.nodes[pgid] = n
|
||||
|
||||
// Update statistics.
|
||||
b.tx.stats.NodeCount++
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// free recursively frees all pages in the bucket.
|
||||
func (b *Bucket) free() {
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
})
|
||||
b.root = 0
|
||||
}
|
||||
|
||||
// dereference removes all references to the old mmap.
|
||||
func (b *Bucket) dereference() {
|
||||
if b.rootNode != nil {
|
||||
b.rootNode.root().dereference()
|
||||
}
|
||||
|
||||
for _, child := range b.buckets {
|
||||
child.dereference()
|
||||
}
|
||||
}
|
||||
|
||||
// pageNode returns the in-memory node, if it exists.
|
||||
// Otherwise returns the underlying page.
|
||||
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||
// Inline buckets have a fake page embedded in their value so treat them
|
||||
// differently. We'll return the rootNode (if available) or the fake page.
|
||||
if b.root == 0 {
|
||||
if id != 0 {
|
||||
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||
}
|
||||
if b.rootNode != nil {
|
||||
return nil, b.rootNode
|
||||
}
|
||||
return b.page, nil
|
||||
}
|
||||
|
||||
// Check the node cache for non-inline buckets.
|
||||
if b.nodes != nil {
|
||||
if n := b.nodes[id]; n != nil {
|
||||
return nil, n
|
||||
}
|
||||
}
|
||||
|
||||
// Finally lookup the page from the transaction if no node is materialized.
|
||||
return b.tx.page(id), nil
|
||||
}
|
||||
|
||||
// BucketStats records statistics about resources used by a bucket.
|
||||
type BucketStats struct {
|
||||
// Page count statistics.
|
||||
BranchPageN int // number of logical branch pages
|
||||
BranchOverflowN int // number of physical branch overflow pages
|
||||
LeafPageN int // number of logical leaf pages
|
||||
LeafOverflowN int // number of physical leaf overflow pages
|
||||
|
||||
// Tree statistics.
|
||||
KeyN int // number of keys/value pairs
|
||||
Depth int // number of levels in B+tree
|
||||
|
||||
// Page size utilization.
|
||||
BranchAlloc int // bytes allocated for physical branch pages
|
||||
BranchInuse int // bytes actually used for branch data
|
||||
LeafAlloc int // bytes allocated for physical leaf pages
|
||||
LeafInuse int // bytes actually used for leaf data
|
||||
|
||||
// Bucket statistics
|
||||
BucketN int // total number of buckets including the top bucket
|
||||
InlineBucketN int // total number on inlined buckets
|
||||
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
|
||||
}
|
||||
|
||||
func (s *BucketStats) Add(other BucketStats) {
|
||||
s.BranchPageN += other.BranchPageN
|
||||
s.BranchOverflowN += other.BranchOverflowN
|
||||
s.LeafPageN += other.LeafPageN
|
||||
s.LeafOverflowN += other.LeafOverflowN
|
||||
s.KeyN += other.KeyN
|
||||
if s.Depth < other.Depth {
|
||||
s.Depth = other.Depth
|
||||
}
|
||||
s.BranchAlloc += other.BranchAlloc
|
||||
s.BranchInuse += other.BranchInuse
|
||||
s.LeafAlloc += other.LeafAlloc
|
||||
s.LeafInuse += other.LeafInuse
|
||||
|
||||
s.BucketN += other.BucketN
|
||||
s.InlineBucketN += other.InlineBucketN
|
||||
s.InlineBucketInuse += other.InlineBucketInuse
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
||||
400
vendor/github.com/boltdb/bolt/cursor.go
generated
vendored
Normal file
400
vendor/github.com/boltdb/bolt/cursor.go
generated
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||
// Cursors see nested buckets with value == nil.
|
||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||
//
|
||||
// Keys and values returned from the cursor are only valid for the life of the transaction.
|
||||
//
|
||||
// Changing data while traversing with a cursor may cause it to be invalidated
|
||||
// and return unexpected keys and/or values. You must reposition your cursor
|
||||
// after mutating data.
|
||||
type Cursor struct {
|
||||
bucket *Bucket
|
||||
stack []elemRef
|
||||
}
|
||||
|
||||
// Bucket returns the bucket that this cursor was created from.
|
||||
func (c *Cursor) Bucket() *Bucket {
|
||||
return c.bucket
|
||||
}
|
||||
|
||||
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) First() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
c.first()
|
||||
|
||||
// If we land on an empty page then move to the next value.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
c.next()
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
|
||||
}
|
||||
|
||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
ref := elemRef{page: p, node: n}
|
||||
ref.index = ref.count() - 1
|
||||
c.stack = append(c.stack, ref)
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.next()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used. If no keys
|
||||
// follow, a nil key is returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
k, v, flags := c.seek(seek)
|
||||
|
||||
// If we ended up after the last element of a page then move to the next one.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
|
||||
k, v, flags = c.next()
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Delete removes the current key/value under the cursor from the bucket.
|
||||
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||
func (c *Cursor) Delete() error {
|
||||
if c.bucket.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !c.bucket.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
key, _, flags := c.keyValue()
|
||||
// Return an error if current value is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used.
|
||||
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||
func (c *Cursor) first() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
var ref = &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the first element to the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
}
|
||||
}
|
||||
|
||||
// last moves the cursor to the last leaf element under the last page in the stack.
|
||||
func (c *Cursor) last() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the last element in the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
|
||||
var nextRef = elemRef{page: p, node: n}
|
||||
nextRef.index = nextRef.count() - 1
|
||||
c.stack = append(c.stack, nextRef)
|
||||
}
|
||||
}
|
||||
|
||||
// next moves to the next leaf element and returns the key and value.
|
||||
// If the cursor is at the last leaf element then it stays there and returns nil.
|
||||
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
for {
|
||||
// Attempt to move over one element until we're successful.
|
||||
// Move up the stack as we hit the end of each page in our stack.
|
||||
var i int
|
||||
for i = len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index < elem.count()-1 {
|
||||
elem.index++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we've hit the root page then stop and return. This will leave the
|
||||
// cursor on the last element of the last page.
|
||||
if i == -1 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Otherwise start from where we left off in the stack and find the
|
||||
// first element of the first leaf page.
|
||||
c.stack = c.stack[:i+1]
|
||||
c.first()
|
||||
|
||||
// If this is an empty page then restart and move back up the stack.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return c.keyValue()
|
||||
}
|
||||
}
|
||||
|
||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||
}
|
||||
e := elemRef{page: p, node: n}
|
||||
c.stack = append(c.stack, e)
|
||||
|
||||
// If we're on a leaf page/node then find the specific node.
|
||||
if e.isLeaf() {
|
||||
c.nsearch(key)
|
||||
return
|
||||
}
|
||||
|
||||
if n != nil {
|
||||
c.searchNode(key, n)
|
||||
return
|
||||
}
|
||||
c.searchPage(key, p)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
var exact bool
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(n.inodes[i].key, key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, n.inodes[index].pgid)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
// Binary search for the correct range.
|
||||
inodes := p.branchPageElements()
|
||||
|
||||
var exact bool
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(inodes[i].key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, inodes[index].pgid)
|
||||
}
|
||||
|
||||
// nsearch searches the leaf node on the top of the stack for a key.
|
||||
func (c *Cursor) nsearch(key []byte) {
|
||||
e := &c.stack[len(c.stack)-1]
|
||||
p, n := e.page, e.node
|
||||
|
||||
// If we have a node then search its inodes.
|
||||
if n != nil {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||
})
|
||||
e.index = index
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a page then search its leaf elements.
|
||||
inodes := p.leafPageElements()
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
}
|
||||
|
||||
// keyValue returns the key and value of the current leaf element.
|
||||
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.count() == 0 || ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Retrieve value from node.
|
||||
if ref.node != nil {
|
||||
inode := &ref.node.inodes[ref.index]
|
||||
return inode.key, inode.value, inode.flags
|
||||
}
|
||||
|
||||
// Or retrieve value from page.
|
||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||
return elem.key(), elem.value(), elem.flags
|
||||
}
|
||||
|
||||
// node returns the node that the cursor is currently positioned on.
|
||||
func (c *Cursor) node() *node {
|
||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
|
||||
// If the top of the stack is a leaf node then just return it.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||
return ref.node
|
||||
}
|
||||
|
||||
// Start from root and traverse down the hierarchy.
|
||||
var n = c.stack[0].node
|
||||
if n == nil {
|
||||
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
}
|
||||
|
||||
// elemRef represents a reference to an element on a given page/node.
|
||||
type elemRef struct {
|
||||
page *page
|
||||
node *node
|
||||
index int
|
||||
}
|
||||
|
||||
// isLeaf returns whether the ref is pointing at a leaf page/node.
|
||||
func (r *elemRef) isLeaf() bool {
|
||||
if r.node != nil {
|
||||
return r.node.isLeaf
|
||||
}
|
||||
return (r.page.flags & leafPageFlag) != 0
|
||||
}
|
||||
|
||||
// count returns the number of inodes or page elements.
|
||||
func (r *elemRef) count() int {
|
||||
if r.node != nil {
|
||||
return len(r.node.inodes)
|
||||
}
|
||||
return int(r.page.count)
|
||||
}
|
||||
1037
vendor/github.com/boltdb/bolt/db.go
generated
vendored
Normal file
1037
vendor/github.com/boltdb/bolt/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/github.com/boltdb/bolt/doc.go
generated
vendored
Normal file
44
vendor/github.com/boltdb/bolt/doc.go
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||
multiple readers and a single writer. Bolt can be used for projects that
|
||||
want a simple data store without the need to add large dependencies such as
|
||||
Postgres or MySQL.
|
||||
|
||||
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
|
||||
optimized for fast read access and does not require recovery in the event of a
|
||||
system crash. Transactions which have not finished committing will simply be
|
||||
rolled back in the event of a crash.
|
||||
|
||||
The design of Bolt is based on Howard Chu's LMDB database project.
|
||||
|
||||
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||
|
||||
|
||||
Basics
|
||||
|
||||
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||
a collection of unique keys that are associated with values.
|
||||
|
||||
Transactions provide either read-only or read-write access to the database.
|
||||
Read-only transactions can retrieve key/value pairs and can use Cursors to
|
||||
iterate over the dataset sequentially. Read-write transactions can create and
|
||||
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||
is allowed at a time.
|
||||
|
||||
|
||||
Caveats
|
||||
|
||||
The database uses a read-only, memory-mapped data file to ensure that
|
||||
applications cannot corrupt the database, however, this means that keys and
|
||||
values returned from Bolt cannot be changed. Writing to a read-only byte slice
|
||||
will cause Go to panic.
|
||||
|
||||
Keys and values retrieved from the database are only valid for the life of
|
||||
the transaction. When used outside the transaction, these byte slices can
|
||||
point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bolt
|
||||
71
vendor/github.com/boltdb/bolt/errors.go
generated
vendored
Normal file
71
vendor/github.com/boltdb/bolt/errors.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
package bolt
|
||||
|
||||
import "errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrDatabaseOpen is returned when opening a database that is
|
||||
// already open.
|
||||
ErrDatabaseOpen = errors.New("database already open")
|
||||
|
||||
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||
// This typically occurs when a file is not a bolt database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
|
||||
// ErrChecksum is returned when either meta page checksum does not match.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
)
|
||||
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
Normal file
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
Normal file
@@ -0,0 +1,252 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
604
vendor/github.com/boltdb/bolt/node.go
generated
vendored
Normal file
604
vendor/github.com/boltdb/bolt/node.go
generated
vendored
Normal file
@@ -0,0 +1,604 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// node represents an in-memory, deserialized page.
|
||||
type node struct {
|
||||
bucket *Bucket
|
||||
isLeaf bool
|
||||
unbalanced bool
|
||||
spilled bool
|
||||
key []byte
|
||||
pgid pgid
|
||||
parent *node
|
||||
children nodes
|
||||
inodes inodes
|
||||
}
|
||||
|
||||
// root returns the top-level node this node is attached to.
|
||||
func (n *node) root() *node {
|
||||
if n.parent == nil {
|
||||
return n
|
||||
}
|
||||
return n.parent.root()
|
||||
}
|
||||
|
||||
// minKeys returns the minimum number of inodes this node should have.
|
||||
func (n *node) minKeys() int {
|
||||
if n.isLeaf {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
// size returns the size of the node after serialization.
|
||||
func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
return branchPageElementSize
|
||||
}
|
||||
|
||||
// childAt returns the child node at a given index.
|
||||
func (n *node) childAt(index int) *node {
|
||||
if n.isLeaf {
|
||||
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||
}
|
||||
return n.bucket.node(n.inodes[index].pgid, n)
|
||||
}
|
||||
|
||||
// childIndex returns the index of a given child node.
|
||||
func (n *node) childIndex(child *node) int {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||
return index
|
||||
}
|
||||
|
||||
// numChildren returns the number of children.
|
||||
func (n *node) numChildren() int {
|
||||
return len(n.inodes)
|
||||
}
|
||||
|
||||
// nextSibling returns the next node with the same parent.
|
||||
func (n *node) nextSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index >= n.parent.numChildren()-1 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index + 1)
|
||||
}
|
||||
|
||||
// prevSibling returns the previous node with the same parent.
|
||||
func (n *node) prevSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index - 1)
|
||||
}
|
||||
|
||||
// put inserts a key/value.
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
if pgid >= n.bucket.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||
} else if len(oldKey) <= 0 {
|
||||
panic("put: zero-length old key")
|
||||
} else if len(newKey) <= 0 {
|
||||
panic("put: zero-length new key")
|
||||
}
|
||||
|
||||
// Find insertion index.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||
|
||||
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||
if !exact {
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
copy(n.inodes[index+1:], n.inodes[index:])
|
||||
}
|
||||
|
||||
inode := &n.inodes[index]
|
||||
inode.flags = flags
|
||||
inode.key = newKey
|
||||
inode.value = value
|
||||
inode.pgid = pgid
|
||||
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||
}
|
||||
|
||||
// del removes a key from the node.
|
||||
func (n *node) del(key []byte) {
|
||||
// Find index of key.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||
|
||||
// Exit if the key isn't found.
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete inode from the node.
|
||||
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
|
||||
|
||||
// Mark the node as needing rebalancing.
|
||||
n.unbalanced = true
|
||||
}
|
||||
|
||||
// read initializes the node from a page.
|
||||
func (n *node) read(p *page) {
|
||||
n.pgid = p.id
|
||||
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||
n.inodes = make(inodes, int(p.count))
|
||||
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
inode := &n.inodes[i]
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
inode.flags = elem.flags
|
||||
inode.key = elem.key()
|
||||
inode.value = elem.value()
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
inode.pgid = elem.pgid
|
||||
inode.key = elem.key()
|
||||
}
|
||||
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
// Save first key so we can find the node in the parent when we spill.
|
||||
if len(n.inodes) > 0 {
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||
} else {
|
||||
n.key = nil
|
||||
}
|
||||
}
|
||||
|
||||
// write writes the items onto one or more pages.
|
||||
func (n *node) write(p *page) {
|
||||
// Initialize page.
|
||||
if n.isLeaf {
|
||||
p.flags |= leafPageFlag
|
||||
} else {
|
||||
p.flags |= branchPageFlag
|
||||
}
|
||||
|
||||
if len(n.inodes) >= 0xFFFF {
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
|
||||
// Stop here if there are no items to write.
|
||||
if p.count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
}
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
for {
|
||||
// Split node into two.
|
||||
a, b := node.splitTwo(pageSize)
|
||||
nodes = append(nodes, a)
|
||||
|
||||
// If we can't split then exit the loop.
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Set node to b so it gets split on the next iteration.
|
||||
node = b
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Determine the threshold before starting a new node.
|
||||
var fillPercent = n.bucket.FillPercent
|
||||
if fillPercent < minFillPercent {
|
||||
fillPercent = minFillPercent
|
||||
} else if fillPercent > maxFillPercent {
|
||||
fillPercent = maxFillPercent
|
||||
}
|
||||
threshold := int(float64(pageSize) * fillPercent)
|
||||
|
||||
// Determine split position and sizes of the two pages.
|
||||
splitIndex, _ := n.splitIndex(threshold)
|
||||
|
||||
// Split node into two separate nodes.
|
||||
// If there's no parent then we'll need to create one.
|
||||
if n.parent == nil {
|
||||
n.parent = &node{bucket: n.bucket, children: []*node{n}}
|
||||
}
|
||||
|
||||
// Create a new node and add it to the parent.
|
||||
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
|
||||
n.parent.children = append(n.parent.children, next)
|
||||
|
||||
// Split inodes across two nodes.
|
||||
next.inodes = n.inodes[splitIndex:]
|
||||
n.inodes = n.inodes[:splitIndex]
|
||||
|
||||
// Update the statistics.
|
||||
n.bucket.tx.stats.Split++
|
||||
|
||||
return n, next
|
||||
}
|
||||
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
break
|
||||
}
|
||||
|
||||
// Add the element size to the total size.
|
||||
sz += elsize
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// spill writes the nodes to dirty pages and splits nodes as it goes.
|
||||
// Returns an error if dirty pages cannot be allocated.
|
||||
func (n *node) spill() error {
|
||||
var tx = n.bucket.tx
|
||||
if n.spilled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill child nodes first. Child nodes can materialize sibling nodes in
|
||||
// the case of split-merge so we cannot use a range loop. We have to check
|
||||
// the children size on every loop iteration.
|
||||
sort.Sort(n.children)
|
||||
for i := 0; i < len(n.children); i++ {
|
||||
if err := n.children[i].spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We no longer need the child list because it's only used for spill tracking.
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the node.
|
||||
if p.id >= tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||
}
|
||||
node.pgid = p.id
|
||||
node.write(p)
|
||||
node.spilled = true
|
||||
|
||||
// Insert into parent inodes.
|
||||
if node.parent != nil {
|
||||
var key = node.key
|
||||
if key == nil {
|
||||
key = node.inodes[0].key
|
||||
}
|
||||
|
||||
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].key
|
||||
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
}
|
||||
|
||||
// Update the statistics.
|
||||
tx.stats.Spill++
|
||||
}
|
||||
|
||||
// If the root node split and created a new root then we need to spill that
|
||||
// as well. We'll clear out the children to make sure it doesn't try to respill.
|
||||
if n.parent != nil && n.parent.pgid == 0 {
|
||||
n.children = nil
|
||||
return n.parent.spill()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rebalance attempts to combine the node with sibling nodes if the node fill
|
||||
// size is below a threshold or if there are not enough keys.
|
||||
func (n *node) rebalance() {
|
||||
if !n.unbalanced {
|
||||
return
|
||||
}
|
||||
n.unbalanced = false
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.Rebalance++
|
||||
|
||||
// Ignore if node is above threshold (25%) and has enough keys.
|
||||
var threshold = n.bucket.tx.db.pageSize / 4
|
||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||
return
|
||||
}
|
||||
|
||||
// Root node has special handling.
|
||||
if n.parent == nil {
|
||||
// If root node is a branch and only has one node then collapse it.
|
||||
if !n.isLeaf && len(n.inodes) == 1 {
|
||||
// Move root's child up.
|
||||
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||
n.isLeaf = child.isLeaf
|
||||
n.inodes = child.inodes[:]
|
||||
n.children = child.children
|
||||
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old child.
|
||||
child.parent = nil
|
||||
delete(n.bucket.nodes, child.pgid)
|
||||
child.free()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If node has no keys then just remove it.
|
||||
if n.numChildren() == 0 {
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
n.parent.rebalance()
|
||||
return
|
||||
}
|
||||
|
||||
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
|
||||
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||
var target *node
|
||||
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||
if useNextSibling {
|
||||
target = n.nextSibling()
|
||||
} else {
|
||||
target = n.prevSibling()
|
||||
}
|
||||
|
||||
// If both this node and the target node are too small then merge them.
|
||||
if useNextSibling {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range target.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes from target and remove target.
|
||||
n.inodes = append(n.inodes, target.inodes...)
|
||||
n.parent.del(target.key)
|
||||
n.parent.removeChild(target)
|
||||
delete(n.bucket.nodes, target.pgid)
|
||||
target.free()
|
||||
} else {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = target
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes to target and remove node.
|
||||
target.inodes = append(target.inodes, n.inodes...)
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
}
|
||||
|
||||
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||
n.parent.rebalance()
|
||||
}
|
||||
|
||||
// removes a node from the list of in-memory children.
|
||||
// This does not affect the inodes.
|
||||
func (n *node) removeChild(target *node) {
|
||||
for i, child := range n.children {
|
||||
if child == target {
|
||||
n.children = append(n.children[:i], n.children[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dereference causes the node to copy all its inode key/value references to heap memory.
|
||||
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
|
||||
func (n *node) dereference() {
|
||||
if n.key != nil {
|
||||
key := make([]byte, len(n.key))
|
||||
copy(key, n.key)
|
||||
n.key = key
|
||||
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
}
|
||||
|
||||
for i := range n.inodes {
|
||||
inode := &n.inodes[i]
|
||||
|
||||
key := make([]byte, len(inode.key))
|
||||
copy(key, inode.key)
|
||||
inode.key = key
|
||||
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||
|
||||
value := make([]byte, len(inode.value))
|
||||
copy(value, inode.value)
|
||||
inode.value = value
|
||||
}
|
||||
|
||||
// Recursively dereference children.
|
||||
for _, child := range n.children {
|
||||
child.dereference()
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.NodeDeref++
|
||||
}
|
||||
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
|
||||
// dump writes the contents of the node to STDERR for debugging purposes.
|
||||
/*
|
||||
func (n *node) dump() {
|
||||
// Write node header.
|
||||
var typ = "branch"
|
||||
if n.isLeaf {
|
||||
typ = "leaf"
|
||||
}
|
||||
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
|
||||
|
||||
// Write out abbreviated version of each item.
|
||||
for _, item := range n.inodes {
|
||||
if n.isLeaf {
|
||||
if item.flags&bucketLeafFlag != 0 {
|
||||
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
|
||||
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
|
||||
} else {
|
||||
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
|
||||
}
|
||||
} else {
|
||||
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
|
||||
}
|
||||
}
|
||||
warn("")
|
||||
}
|
||||
*/
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type inode struct {
|
||||
flags uint32
|
||||
pgid pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type inodes []inode
|
||||
197
vendor/github.com/boltdb/bolt/page.go
generated
vendored
Normal file
197
vendor/github.com/boltdb/bolt/page.go
generated
vendored
Normal file
@@ -0,0 +1,197 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
leafPageFlag = 0x02
|
||||
metaPageFlag = 0x04
|
||||
freelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
bucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type pgid uint64
|
||||
|
||||
type page struct {
|
||||
id pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
func (p *page) typ() string {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
return "branch"
|
||||
} else if (p.flags & leafPageFlag) != 0 {
|
||||
return "leaf"
|
||||
} else if (p.flags & metaPageFlag) != 0 {
|
||||
return "meta"
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
type pages []*page
|
||||
|
||||
func (s pages) Len() int { return len(s) }
|
||||
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid pgid
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type pgids []pgid
|
||||
|
||||
func (s pgids) Len() int { return len(s) }
|
||||
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// merge returns the sorted union of a and b.
|
||||
func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
merged := make(pgids, len(a)+len(b))
|
||||
mergepgids(merged, a, b)
|
||||
return merged
|
||||
}
|
||||
|
||||
// mergepgids copies the sorted union of a and b into dst.
|
||||
// If dst is too small, it panics.
|
||||
func mergepgids(dst, a, b pgids) {
|
||||
if len(dst) < len(a)+len(b) {
|
||||
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||
}
|
||||
// Copy in the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
copy(dst, b)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
copy(dst, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Merged will hold all elements from both lists.
|
||||
merged := dst[:0]
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
_ = append(merged, follow...)
|
||||
}
|
||||
686
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
Normal file
686
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
Normal file
@@ -0,0 +1,686 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txid represents the internal transaction identifier.
|
||||
type txid uint64
|
||||
|
||||
// Tx represents a read-only or read/write transaction on the database.
|
||||
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||
// Read/write transactions can create and remove buckets and create and remove keys.
|
||||
//
|
||||
// IMPORTANT: You must commit or rollback transactions when you are done with
|
||||
// them. Pages can not be reclaimed by the writer until no more transactions
|
||||
// are using them. A long running read transaction can cause the database to
|
||||
// quickly grow.
|
||||
type Tx struct {
|
||||
writable bool
|
||||
managed bool
|
||||
db *DB
|
||||
meta *meta
|
||||
root Bucket
|
||||
pages map[pgid]*page
|
||||
stats TxStats
|
||||
commitHandlers []func()
|
||||
|
||||
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
||||
// Tx opens the database file with the specified flag to copy the data.
|
||||
//
|
||||
// By default, the flag is unset, which works well for mostly in-memory
|
||||
// workloads. For databases that are much larger than available RAM,
|
||||
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
||||
WriteFlag int
|
||||
}
|
||||
|
||||
// init initializes the transaction.
|
||||
func (tx *Tx) init(db *DB) {
|
||||
tx.db = db
|
||||
tx.pages = nil
|
||||
|
||||
// Copy the meta page since it can be changed by the writer.
|
||||
tx.meta = &meta{}
|
||||
db.meta().copy(tx.meta)
|
||||
|
||||
// Copy over the root bucket.
|
||||
tx.root = newBucket(tx)
|
||||
tx.root.bucket = &bucket{}
|
||||
*tx.root.bucket = tx.meta.root
|
||||
|
||||
// Increment the transaction id and add a page cache for writable transactions.
|
||||
if tx.writable {
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.meta.txid += txid(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
func (tx *Tx) DB() *DB {
|
||||
return tx.db
|
||||
}
|
||||
|
||||
// Size returns current database size in bytes as seen by this transaction.
|
||||
func (tx *Tx) Size() int64 {
|
||||
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||
}
|
||||
|
||||
// Writable returns whether the transaction can perform write operations.
|
||||
func (tx *Tx) Writable() bool {
|
||||
return tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the root bucket.
|
||||
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (tx *Tx) Cursor() *Cursor {
|
||||
return tx.root.Cursor()
|
||||
}
|
||||
|
||||
// Stats retrieves a copy of the current transaction statistics.
|
||||
func (tx *Tx) Stats() TxStats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// Bucket retrieves a bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||
return tx.root.Bucket(name)
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucket(name)
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucketIfNotExists(name)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
return tx.root.DeleteBucket(name)
|
||||
}
|
||||
|
||||
// ForEach executes a function for each bucket in the root.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||
func (tx *Tx) OnCommit(fn func()) {
|
||||
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk and updates the meta page.
|
||||
// Returns an error if a disk write error occurs, or if Commit is
|
||||
// called on a read-only transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
_assert(!tx.managed, "managed tx commit not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !tx.writable {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||
|
||||
// Rebalance nodes which have had deletions.
|
||||
var startTime = time.Now()
|
||||
tx.root.rebalance()
|
||||
if tx.stats.Rebalance > 0 {
|
||||
tx.stats.RebalanceTime += time.Since(startTime)
|
||||
}
|
||||
|
||||
// spill data onto dirty pages.
|
||||
startTime = time.Now()
|
||||
if err := tx.root.spill(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.SpillTime += time.Since(startTime)
|
||||
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
startTime = time.Now()
|
||||
if err := tx.write(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// If strict mode is enabled then perform a consistency check.
|
||||
// Only the first consistency error is reported in the panic.
|
||||
if tx.db.StrictMode {
|
||||
ch := tx.Check()
|
||||
var errs []string
|
||||
for {
|
||||
err, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
panic("check fail: " + strings.Join(errs, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// Write meta to disk.
|
||||
if err := tx.writeMeta(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.WriteTime += time.Since(startTime)
|
||||
|
||||
// Finalize the transaction.
|
||||
tx.close()
|
||||
|
||||
// Execute commit handlers now that the locks have been removed.
|
||||
for _, fn := range tx.commitHandlers {
|
||||
fn()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
tx.rollback()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) rollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
|
||||
func (tx *Tx) close() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
// Grab freelist stats.
|
||||
var freelistFreeN = tx.db.freelist.free_count()
|
||||
var freelistPendingN = tx.db.freelist.pending_count()
|
||||
var freelistAlloc = tx.db.freelist.size()
|
||||
|
||||
// Remove transaction ref & writer lock.
|
||||
tx.db.rwtx = nil
|
||||
tx.db.rwlock.Unlock()
|
||||
|
||||
// Merge statistics.
|
||||
tx.db.statlock.Lock()
|
||||
tx.db.stats.FreePageN = freelistFreeN
|
||||
tx.db.stats.PendingPageN = freelistPendingN
|
||||
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
||||
tx.db.stats.FreelistInuse = freelistAlloc
|
||||
tx.db.stats.TxStats.add(&tx.stats)
|
||||
tx.db.statlock.Unlock()
|
||||
} else {
|
||||
tx.db.removeTx(tx)
|
||||
}
|
||||
|
||||
// Clear all references.
|
||||
tx.db = nil
|
||||
tx.meta = nil
|
||||
tx.root = Bucket{tx: tx}
|
||||
tx.pages = nil
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility.
|
||||
//
|
||||
// Deprecated; Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteTo writes the entire database to a writer.
|
||||
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
// Attempt to open reader with WriteFlag
|
||||
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = metaPageFlag
|
||||
*page.meta() = *tx.meta
|
||||
|
||||
// Write meta 0.
|
||||
page.id = 0
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err := w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 0 copy: %s", err)
|
||||
}
|
||||
|
||||
// Write meta 1 with a lower transaction id.
|
||||
page.id = 1
|
||||
page.meta().txid -= 1
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err = w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 1 copy: %s", err)
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
// Copy data pages.
|
||||
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
||||
n += wn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
// A reader transaction is maintained during the copy so it is safe to continue
|
||||
// using the database while a copy is in progress.
|
||||
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Copy(f)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
func (tx *Tx) Check() <-chan error {
|
||||
ch := make(chan error)
|
||||
go tx.check(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
all := make([]pgid, tx.db.freelist.count())
|
||||
tx.db.freelist.copyall(all)
|
||||
for _, id := range all {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel to signal completion.
|
||||
close(ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check every page used by this bucket.
|
||||
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||
if p.id > tx.meta.pgid {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||
var id = p.id + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.id] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||
}
|
||||
})
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.checkBucket(child, reachable, freed, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save to our page cache.
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// write writes any dirty pages to disk.
|
||||
func (tx *Tx) write() error {
|
||||
// Sort pages by id.
|
||||
pages := make(pages, 0, len(tx.pages))
|
||||
for _, p := range tx.pages {
|
||||
pages = append(pages, p)
|
||||
}
|
||||
// Clear out page cache early.
|
||||
tx.pages = make(map[pgid]*page)
|
||||
sort.Sort(pages)
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore file sync if flag is set on DB.
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Put small pages back to page pool.
|
||||
for _, p := range pages {
|
||||
// Ignore page sizes over 1 page.
|
||||
// These are allocated using make() instead of the page pool.
|
||||
if int(p.overflow) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
tx.db.pagePool.Put(buf)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMeta writes the meta to the disk.
|
||||
func (tx *Tx) writeMeta() error {
|
||||
// Create a temporary buffer for the meta page.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
p := tx.db.pageInBuffer(buf, 0)
|
||||
tx.meta.write(p)
|
||||
|
||||
// Write the meta page to file.
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||
return err
|
||||
}
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// page returns a reference to the page with a given id.
|
||||
// If page has been written to then a temporary buffered page is returned.
|
||||
func (tx *Tx) page(id pgid) *page {
|
||||
// Check the dirty pages first.
|
||||
if tx.pages != nil {
|
||||
if p, ok := tx.pages[id]; ok {
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise return directly from the mmap.
|
||||
return tx.db.page(id)
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page within a given page and executes a function.
|
||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||
p := tx.page(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page returns page information for a given page number.
|
||||
// This is only safe for concurrent use when used by a writable transaction.
|
||||
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
if tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if pgid(id) >= tx.meta.pgid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the page info.
|
||||
p := tx.db.page(pgid(id))
|
||||
info := &PageInfo{
|
||||
ID: id,
|
||||
Count: int(p.count),
|
||||
OverflowCount: int(p.overflow),
|
||||
}
|
||||
|
||||
// Determine the type (or if it's free).
|
||||
if tx.db.freelist.freed(pgid(id)) {
|
||||
info.Type = "free"
|
||||
} else {
|
||||
info.Type = p.typ()
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// TxStats represents statistics about the actions performed by the transaction.
|
||||
type TxStats struct {
|
||||
// Page statistics.
|
||||
PageCount int // number of page allocations
|
||||
PageAlloc int // total bytes allocated
|
||||
|
||||
// Cursor statistics.
|
||||
CursorCount int // number of cursors created
|
||||
|
||||
// Node statistics
|
||||
NodeCount int // number of node allocations
|
||||
NodeDeref int // number of node dereferences
|
||||
|
||||
// Rebalance statistics.
|
||||
Rebalance int // number of node rebalances
|
||||
RebalanceTime time.Duration // total time spent rebalancing
|
||||
|
||||
// Split/Spill statistics.
|
||||
Split int // number of nodes split
|
||||
Spill int // number of nodes spilled
|
||||
SpillTime time.Duration // total time spent spilling
|
||||
|
||||
// Write statistics.
|
||||
Write int // number of writes performed
|
||||
WriteTime time.Duration // total time spent writing to disk
|
||||
}
|
||||
|
||||
func (s *TxStats) add(other *TxStats) {
|
||||
s.PageCount += other.PageCount
|
||||
s.PageAlloc += other.PageAlloc
|
||||
s.CursorCount += other.CursorCount
|
||||
s.NodeCount += other.NodeCount
|
||||
s.NodeDeref += other.NodeDeref
|
||||
s.Rebalance += other.Rebalance
|
||||
s.RebalanceTime += other.RebalanceTime
|
||||
s.Split += other.Split
|
||||
s.Spill += other.Spill
|
||||
s.SpillTime += other.SpillTime
|
||||
s.Write += other.Write
|
||||
s.WriteTime += other.WriteTime
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||
// This is useful when obtaining stats at two different points and time and
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||
var diff TxStats
|
||||
diff.PageCount = s.PageCount - other.PageCount
|
||||
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||
diff.Split = s.Split - other.Split
|
||||
diff.Spill = s.Spill - other.Spill
|
||||
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||
diff.Write = s.Write - other.Write
|
||||
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||
return diff
|
||||
}
|
||||
10
vendor/github.com/containers/image/README.md
generated
vendored
10
vendor/github.com/containers/image/README.md
generated
vendored
@@ -25,7 +25,7 @@ them as necessary, and to sign and verify images.
|
||||
The containers/image project is only a library with no user interface;
|
||||
you can either incorporate it into your Go programs, or use the `skopeo` tool:
|
||||
|
||||
The [skopeo](https://github.com/projectatomic/skopeo) tool uses the
|
||||
The [skopeo](https://github.com/containers/skopeo) tool uses the
|
||||
containers/image library and takes advantage of many of its features,
|
||||
e.g. `skopeo copy` exposes the `containers/image/copy.Image` functionality.
|
||||
|
||||
@@ -42,7 +42,7 @@ What this project tests against dependencies-wise is located
|
||||
## Building
|
||||
|
||||
If you want to see what the library can do, or an example of how it is called,
|
||||
consider starting with the [skopeo](https://github.com/projectatomic/skopeo) tool
|
||||
consider starting with the [skopeo](https://github.com/containers/skopeo) tool
|
||||
instead.
|
||||
|
||||
To integrate this library into your project, put it into `$GOPATH` or use
|
||||
@@ -53,7 +53,7 @@ are also available
|
||||
|
||||
This library, by default, also depends on the GpgME and libostree C libraries. Either install them:
|
||||
```sh
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel libostree-devel
|
||||
Fedora$ dnf install gpgme-devel libassuan-devel ostree-devel
|
||||
macOS$ brew install gpgme
|
||||
```
|
||||
or use the build tags described below to avoid the dependencies (e.g. using `go build -tags …`)
|
||||
@@ -73,7 +73,9 @@ When developing this library, please use `make` (or `make … BUILDTAGS=…`) to
|
||||
|
||||
## License
|
||||
|
||||
ASL 2.0
|
||||
Apache License 2.0
|
||||
|
||||
SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
## Contact
|
||||
|
||||
|
||||
312
vendor/github.com/containers/image/copy/copy.go
generated
vendored
312
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -2,7 +2,6 @@ package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -10,28 +9,38 @@ import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// maxParallelDownloads is used to limit the maxmimum number of parallel
|
||||
// downloads. Let's follow Firefox by limiting it to 6.
|
||||
var maxParallelDownloads = 6
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
@@ -64,6 +73,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
d.validationFailed = true
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
d.validationSucceeded = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@@ -71,22 +81,23 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
copiedBlobs map[digest.Digest]digest.Digest
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
copyInParallel bool
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
canSubstituteBlobs bool
|
||||
}
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
@@ -103,8 +114,9 @@ type Options struct {
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
|
||||
// source image admissibility. It returns the manifest which was written to
|
||||
// the new copy of the image.
|
||||
func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (manifest []byte, retErr error) {
|
||||
// NOTE this function uses an output parameter for the error return value.
|
||||
// Setting this and returning is the ideal way to return an error.
|
||||
//
|
||||
@@ -122,7 +134,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
dest, err := destRef.NewImageDestination(ctx, options.DestinationCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
@@ -132,7 +144,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(ctx, options.SourceCtx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
defer func() {
|
||||
if err := rawSource.Close(); err != nil {
|
||||
@@ -140,76 +152,80 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}
|
||||
}()
|
||||
|
||||
copyInParallel := dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob()
|
||||
c := &copier{
|
||||
copiedBlobs: make(map[digest.Digest]digest.Digest),
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
copyInParallel: copyInParallel,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
multiImage, err := isMultiImage(ctx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
if !multiImage {
|
||||
// The simple case: Just copy a single image.
|
||||
if err := c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return err
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedToplevel); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// This is a manifest list. Choose a single image and copy it.
|
||||
// FIXME: Copy to destinations which support manifest lists, one image at a time.
|
||||
instanceDigest, err := image.ChooseManifestInstanceFromManifestList(ctx, options.SourceCtx, unparsedToplevel)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
return nil, errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef))
|
||||
}
|
||||
logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest)
|
||||
unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest)
|
||||
|
||||
if err := c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return err
|
||||
if manifest, err = c.copyOneImage(ctx, policyContext, options, unparsedInstance); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := c.dest.Commit(ctx); err != nil {
|
||||
return errors.Wrap(err, "Error committing the finished image")
|
||||
return nil, errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) {
|
||||
func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (manifest []byte, retErr error) {
|
||||
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
|
||||
// Make sure we fail cleanly in such cases.
|
||||
multiImage, err := isMultiImage(ctx, unparsedImage)
|
||||
if err != nil {
|
||||
// FIXME FIXME: How to name a reference for the sub-image?
|
||||
return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
return nil, errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference()))
|
||||
}
|
||||
if multiImage {
|
||||
return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
return nil, fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
|
||||
}
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
// (the multiImage check above only matches the MIME type, which we have received anyway.
|
||||
// Actual parsing of anything should be deferred.)
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return errors.Wrap(err, "Source image rejected")
|
||||
return nil, errors.Wrap(err, "Source image rejected")
|
||||
}
|
||||
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
return nil, errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference()))
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntimeOS(ctx, options.DestinationCtx, src, c.dest); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
@@ -219,14 +235,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
c.Printf("Getting image source signatures\n")
|
||||
s, err := src.Signatures(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
return nil, errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
c.Printf("Checking if image destination supports signatures\n")
|
||||
if err := c.dest.SupportsSignatures(ctx); err != nil {
|
||||
return errors.Wrap(err, "Can not copy signatures")
|
||||
return nil, errors.Wrap(err, "Can not copy signatures")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -236,31 +252,38 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0,
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
|
||||
}
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// We compute preferredManifestMIMEType only to show it in error messages.
|
||||
// Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
|
||||
preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(ctx, c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
|
||||
|
||||
if err := ic.copyLayers(ctx); err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
|
||||
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
|
||||
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
|
||||
// So, try the preferred manifest MIME type. If the process succeeds, fine…
|
||||
manifest, err := ic.copyUpdatedConfigAndManifest(ctx)
|
||||
manifest, err = ic.copyUpdatedConfigAndManifest(ctx)
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
|
||||
// … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
|
||||
@@ -268,14 +291,14 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
if !ic.canModifyManifest {
|
||||
return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
return nil, errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
@@ -296,24 +319,24 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
break
|
||||
}
|
||||
if errs != nil {
|
||||
return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
return nil, fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if options.SignBy != "" {
|
||||
newSig, err := c.createSignature(manifest, options.SignBy)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
c.Printf("Storing signatures\n")
|
||||
if err := c.dest.PutSignatures(ctx, sigs); err != nil {
|
||||
return errors.Wrap(err, "Error writing signatures")
|
||||
return nil, errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
return nil
|
||||
return manifest, nil
|
||||
}
|
||||
|
||||
// Printf writes a formatted string to c.reportWriter.
|
||||
@@ -366,11 +389,26 @@ func (ic *imageCopier) updateEmbeddedDockerReference() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// shortDigest returns the first 12 characters of the digest.
|
||||
func shortDigest(d digest.Digest) string {
|
||||
return d.Encoded()[:12]
|
||||
}
|
||||
|
||||
// createProgressBar creates a pb.ProgressBar.
|
||||
func createProgressBar(srcInfo types.BlobInfo, kind string, writer io.Writer) *pb.ProgressBar {
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Prefix(fmt.Sprintf("Copying %s %s:", kind, shortDigest(srcInfo.Digest)))
|
||||
bar.Output = writer
|
||||
return bar
|
||||
}
|
||||
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
destInfos := []types.BlobInfo{}
|
||||
diffIDs := []digest.Digest{}
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -383,30 +421,83 @@ func (ic *imageCopier) copyLayers(ctx context.Context) error {
|
||||
srcInfos = updatedSrcInfos
|
||||
srcInfosUpdated = true
|
||||
}
|
||||
for _, srcLayer := range srcInfos {
|
||||
var (
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
)
|
||||
|
||||
type copyLayerData struct {
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
copyGroup.Add(numLayers)
|
||||
// copySemaphore is used to limit the number of parallel downloads to
|
||||
// avoid malicious images causing troubles and to be nice to servers.
|
||||
var copySemaphore *semaphore.Weighted
|
||||
if ic.c.copyInParallel {
|
||||
copySemaphore = semaphore.NewWeighted(int64(maxParallelDownloads))
|
||||
} else {
|
||||
copySemaphore = semaphore.NewWeighted(int64(1))
|
||||
}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, bar *pb.ProgressBar) {
|
||||
defer bar.Finish()
|
||||
defer copySemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
cld := copyLayerData{}
|
||||
if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
return errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
bar.Prefix(fmt.Sprintf("Skipping blob %s (DiffID foreign layer unimplemented):", shortDigest(srcLayer.Digest)))
|
||||
bar.Finish()
|
||||
} else {
|
||||
cld.destInfo = srcLayer
|
||||
logrus.Debugf("Skipping foreign layer %q copy to %s\n", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
bar.Prefix(fmt.Sprintf("Skipping blob %s (foreign layer):", shortDigest(srcLayer.Digest)))
|
||||
bar.Add64(bar.Total)
|
||||
bar.Finish()
|
||||
}
|
||||
destInfo = srcLayer
|
||||
ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name())
|
||||
} else {
|
||||
destInfo, diffID, err = ic.copyLayer(ctx, srcLayer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, bar)
|
||||
}
|
||||
destInfos = append(destInfos, destInfo)
|
||||
diffIDs = append(diffIDs, diffID)
|
||||
data[index] = cld
|
||||
}
|
||||
|
||||
progressBars := make([]*pb.ProgressBar, numLayers)
|
||||
for i, srcInfo := range srcInfos {
|
||||
bar := createProgressBar(srcInfo, "blob", nil)
|
||||
progressBars[i] = bar
|
||||
}
|
||||
|
||||
progressPool := pb.NewPool(progressBars...)
|
||||
progressPool.Output = ic.c.reportWriter
|
||||
if err := progressPool.Start(); err != nil {
|
||||
return errors.Wrapf(err, "error creating progress-bar pool")
|
||||
}
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
copySemaphore.Acquire(ctx, 1)
|
||||
go copyLayerHelper(i, srcLayer, progressBars[i])
|
||||
}
|
||||
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
|
||||
copyGroup.Wait()
|
||||
progressPool.Stop()
|
||||
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return cld.err
|
||||
}
|
||||
destInfos[i] = cld.destInfo
|
||||
diffIDs[i] = cld.diffID
|
||||
}
|
||||
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
@@ -473,12 +564,14 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context) ([]byte
|
||||
func (c *copier) copyConfig(ctx context.Context, src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
if srcInfo.Digest != "" {
|
||||
c.Printf("Copying config %s\n", srcInfo.Digest)
|
||||
configBlob, err := src.ConfigBlob(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true)
|
||||
bar := createProgressBar(srcInfo, "config", c.reportWriter)
|
||||
defer bar.Finish()
|
||||
bar.Start()
|
||||
destInfo, err := c.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, false, true, bar)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -498,57 +591,53 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, bar *pb.ProgressBar) (types.BlobInfo, digest.Digest, error) {
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
bar.Prefix(fmt.Sprintf("Skipping blob %s (already present):", shortDigest(srcInfo.Digest)))
|
||||
bar.Add64(bar.Total)
|
||||
bar.Finish()
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded)
|
||||
diffIDIsNeeded, bar)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
var diffIDResult diffIDResult // = {digest:""}
|
||||
if diffIDIsNeeded {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return types.BlobInfo{}, "", ctx.Err()
|
||||
case diffIDResult = <-diffIDChan:
|
||||
case diffIDResult := <-diffIDChan:
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
}
|
||||
} else {
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
}
|
||||
|
||||
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
|
||||
@@ -556,7 +645,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
diffIDIsNeeded bool, bar *pb.ProgressBar) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
@@ -580,7 +669,7 @@ func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Rea
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false) // Sets err to nil on success
|
||||
blobInfo, err := ic.c.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest, false, bar) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
@@ -617,14 +706,14 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc)
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canModifyBlob bool, isConfig bool) (types.BlobInfo, error) {
|
||||
canModifyBlob bool, isConfig bool, bar *pb.ProgressBar) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
// === Process input through digestingReader to validate against the expected digest.
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
@@ -640,16 +729,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
|
||||
// === Report progress using a pb.Reader.
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.Output = c.reportWriter
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Start()
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
defer bar.Finish()
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
@@ -660,8 +740,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
compressionOperation = types.Compress
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
@@ -674,6 +756,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
compressionOperation = types.Decompress
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -684,6 +767,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
}
|
||||
|
||||
@@ -699,7 +783,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
@@ -722,6 +806,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
if digestingReader.validationSucceeded {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
}
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
@@ -732,7 +832,7 @@ func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
zipper := gzip.NewWriter(dest)
|
||||
zipper := pgzip.NewWriter(dest)
|
||||
defer zipper.Close()
|
||||
|
||||
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
|
||||
|
||||
32
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
32
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -124,13 +124,19 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dirImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -169,27 +175,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
|
||||
func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@@ -48,8 +48,15 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return m, manifest.GuessMIMEType(m), err
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dirImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
r, err := os.Open(s.ref.layerPath(info.Digest))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
|
||||
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// bicTransportScope returns a BICTransportScope appropriate for ref.
|
||||
func bicTransportScope(ref dockerReference) types.BICTransportScope {
|
||||
// Blobs can be reused across the whole registry.
|
||||
return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
|
||||
}
|
||||
|
||||
// newBICLocationReference returns a BICLocationReference appropriate for ref.
|
||||
func newBICLocationReference(ref dockerReference) types.BICLocationReference {
|
||||
// Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
|
||||
return types.BICLocationReference{Opaque: ref.ref.Name()}
|
||||
}
|
||||
|
||||
// parseBICLocationReference returns a repository for encoded lr.
|
||||
func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(lr.Opaque)
|
||||
}
|
||||
263
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
263
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -13,10 +13,12 @@ import (
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/pkg/docker/config"
|
||||
"github.com/containers/image/pkg/sysregistriesv2"
|
||||
"github.com/containers/image/pkg/tlsclientconfig"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
@@ -69,30 +71,39 @@ type extensionSignatureList struct {
|
||||
}
|
||||
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
type dockerClient struct {
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
sys *types.SystemContext
|
||||
registry string
|
||||
client *http.Client
|
||||
insecureSkipTLSVerify bool
|
||||
|
||||
// The following members are not set by newDockerClient and must be set by callers if needed.
|
||||
username string
|
||||
password string
|
||||
client *http.Client
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
// The following members are private state for setupRequestAuth, both are valid if token != nil.
|
||||
token *bearerToken
|
||||
tokenExpiration time.Time
|
||||
|
||||
// Private state for setupRequestAuth (key: string, value: bearerToken)
|
||||
tokenCache sync.Map
|
||||
// detectPropertiesError caches the initial error.
|
||||
detectPropertiesError error
|
||||
// detectPropertiesOnce is used to execuute detectProperties() at most once in in makeRequest().
|
||||
detectPropertiesOnce sync.Once
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
@@ -100,6 +111,19 @@ type authScope struct {
|
||||
actions string
|
||||
}
|
||||
|
||||
// sendAuth determines whether we need authentication for v2 or v1 endpoint.
|
||||
type sendAuth int
|
||||
|
||||
const (
|
||||
// v2 endpoint with authentication.
|
||||
v2Auth sendAuth = iota
|
||||
// v1 endpoint with authentication.
|
||||
// TODO: Get v1Auth working
|
||||
// v1Auth
|
||||
// no authentication, works for both v1 and v2.
|
||||
noAuth
|
||||
)
|
||||
|
||||
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
token := new(bearerToken)
|
||||
if err := json.Unmarshal(blob, &token); err != nil {
|
||||
@@ -115,6 +139,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
@@ -181,13 +206,26 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, write
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteName := reference.Path(ref.ref)
|
||||
|
||||
return newDockerClientWithDetails(sys, registry, username, password, actions, sigBase, remoteName)
|
||||
client, err := newDockerClient(sys, registry, ref.ref.Name())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
client.signatureBase = sigBase
|
||||
client.scope.actions = actions
|
||||
client.scope.remoteName = reference.Path(ref.ref)
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// newDockerClientWithDetails returns a new dockerClient instance for the given parameters
|
||||
func newDockerClientWithDetails(sys *types.SystemContext, registry, username, password, actions string, sigBase signatureStorageBase, remoteName string) (*dockerClient, error) {
|
||||
// newDockerClient returns a new dockerClient instance for the given registry
|
||||
// and reference. The reference is used to query the registry configuration
|
||||
// and can either be a registry (e.g, "registry.com[:5000]"), a repository
|
||||
// (e.g., "registry.com[:5000][/some/namespace]/repo").
|
||||
// Please note that newDockerClient does not set all members of dockerClient
|
||||
// (e.g., username and password); those must be set by callers if necessary.
|
||||
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
|
||||
hostName := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
@@ -208,33 +246,43 @@ func newDockerClientWithDetails(sys *types.SystemContext, registry, username, pa
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if sys != nil && sys.DockerInsecureSkipTLSVerify {
|
||||
tr.TLSClientConfig.InsecureSkipVerify = true
|
||||
// Check if TLS verification shall be skipped (default=false) which can
|
||||
// either be specified in the sysregistriesv2 configuration or via the
|
||||
// SystemContext, whereas the SystemContext is prioritized.
|
||||
skipVerify := false
|
||||
if sys != nil && sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined {
|
||||
// Only use the SystemContext if the actual value is defined.
|
||||
skipVerify = sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue
|
||||
} else {
|
||||
reg, err := sysregistriesv2.FindRegistry(sys, reference)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error loading registries")
|
||||
}
|
||||
if reg != nil {
|
||||
skipVerify = reg.Insecure
|
||||
}
|
||||
}
|
||||
tr.TLSClientConfig.InsecureSkipVerify = skipVerify
|
||||
|
||||
return &dockerClient{
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
username: username,
|
||||
password: password,
|
||||
client: &http.Client{Transport: tr},
|
||||
signatureBase: sigBase,
|
||||
scope: authScope{
|
||||
actions: actions,
|
||||
remoteName: remoteName,
|
||||
},
|
||||
sys: sys,
|
||||
registry: registry,
|
||||
client: &http.Client{Transport: tr},
|
||||
insecureSkipTLSVerify: skipVerify,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckAuth validates the credentials by attempting to log into the registry
|
||||
// returns an error if an error occcured while making the http request or the status code received was 401
|
||||
func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error {
|
||||
newLoginClient, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
client, err := newDockerClient(sys, registry, registry)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
resp, err := newLoginClient.makeRequest(ctx, "GET", "/v2/", nil, nil)
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/", nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -246,7 +294,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
|
||||
case http.StatusUnauthorized:
|
||||
return ErrUnauthorizedForCredentials
|
||||
default:
|
||||
return errors.Errorf("error occured with status code %q", resp.StatusCode)
|
||||
return errors.Errorf("error occured with status code %d (%s)", resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,25 +334,59 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
return nil, errors.Wrapf(err, "error getting username and password")
|
||||
}
|
||||
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore the call made to that endpoint will fail
|
||||
// So using the v1 hostname for docker.io for simplicity of implementation and the fact that it returns search results
|
||||
// The /v2/_catalog endpoint has been disabled for docker.io therefore
|
||||
// the call made to that endpoint will fail. So using the v1 hostname
|
||||
// for docker.io for simplicity of implementation and the fact that it
|
||||
// returns search results.
|
||||
hostname := registry
|
||||
if registry == dockerHostname {
|
||||
registry = dockerV1Hostname
|
||||
hostname = dockerV1Hostname
|
||||
}
|
||||
|
||||
client, err := newDockerClientWithDetails(sys, registry, username, password, "", nil, "")
|
||||
client, err := newDockerClient(sys, hostname, registry)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error creating new docker client")
|
||||
}
|
||||
client.username = username
|
||||
client.password = password
|
||||
|
||||
// Only try the v1 search endpoint if the search query is not empty. If it is
|
||||
// empty skip to the v2 endpoint.
|
||||
if image != "" {
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", u.String(), nil, nil, noAuth)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("trying to talk to v2 search endpoint\n")
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil)
|
||||
resp, err := client.makeRequest(ctx, "GET", "/v2/_catalog", nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v2 endpoint %q, status code %q", registry, resp.StatusCode)
|
||||
logrus.Errorf("error getting search results from v2 endpoint %q, status code %d (%s)", registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil {
|
||||
return nil, err
|
||||
@@ -322,50 +404,25 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
|
||||
}
|
||||
}
|
||||
|
||||
// set up the query values for the v1 endpoint
|
||||
u := url.URL{
|
||||
Path: "/v1/search",
|
||||
}
|
||||
q := u.Query()
|
||||
q.Set("q", image)
|
||||
q.Set("n", strconv.Itoa(limit))
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
logrus.Debugf("trying to talk to v1 search endpoint\n")
|
||||
resp, err = client.makeRequest(ctx, "GET", u.String(), nil, nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err)
|
||||
} else {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
logrus.Debugf("error getting search results from v1 endpoint %q, status code %q", registry, resp.StatusCode)
|
||||
} else {
|
||||
if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v1Res.Results, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(err, "couldn't search registry %q", registry)
|
||||
}
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth) (*http.Response, error) {
|
||||
if err := c.detectProperties(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
|
||||
return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, auth)
|
||||
}
|
||||
|
||||
// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// streamLen, if not -1, specifies the length of the data expected on stream.
|
||||
// makeRequest should generally be preferred.
|
||||
// TODO(runcom): too many arguments here, use a struct
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
|
||||
func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth) (*http.Response, error) {
|
||||
req, err := http.NewRequest(method, url, stream)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -383,7 +440,7 @@ func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url
|
||||
if c.sys != nil && c.sys.DockerRegistryUserAgent != "" {
|
||||
req.Header.Add("User-Agent", c.sys.DockerRegistryUserAgent)
|
||||
}
|
||||
if sendAuth {
|
||||
if auth == v2Auth {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -415,24 +472,27 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "bearer":
|
||||
if c.token == nil || time.Now().After(c.tokenExpiration) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
var scope string
|
||||
if c.scope.remoteName != "" && c.scope.actions != "" {
|
||||
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
}
|
||||
token, err := c.getBearerToken(req.Context(), realm, service, scope)
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if c.extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
|
||||
scopes = append(scopes, *c.extraScope)
|
||||
}
|
||||
var token bearerToken
|
||||
t, inCache := c.tokenCache.Load(cacheKey)
|
||||
if inCache {
|
||||
token = t.(bearerToken)
|
||||
}
|
||||
if !inCache || time.Now().After(token.expirationTime) {
|
||||
t, err := c.getBearerToken(req.Context(), challenge, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.token = token
|
||||
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
token = *t
|
||||
c.tokenCache.Store(cacheKey, token)
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
|
||||
return nil
|
||||
default:
|
||||
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
|
||||
@@ -442,7 +502,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -452,16 +517,19 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
if c.username != "" {
|
||||
getParams.Add("account", c.username)
|
||||
}
|
||||
if service != "" {
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
if scope != "" {
|
||||
getParams.Add("scope", scope)
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
authReq.URL.RawQuery = getParams.Encode()
|
||||
if c.username != "" && c.password != "" {
|
||||
authReq.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
logrus.Debugf("%s %s", authReq.Method, authReq.URL.String())
|
||||
tr := tlsclientconfig.NewTransport()
|
||||
// TODO(runcom): insecure for now to contact the external token service
|
||||
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
@@ -477,7 +545,7 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
return nil, errors.Errorf("unexpected http code: %d (%s), URL: %s", res.StatusCode, http.StatusText(res.StatusCode), authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -487,24 +555,24 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// detectPropertiesHelper performs the work of detectProperties which executes
|
||||
// it at most once.
|
||||
func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error {
|
||||
if c.scheme != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return errors.Errorf("error pinging registry %s, response code %d", c.registry, resp.StatusCode)
|
||||
return errors.Errorf("error pinging registry %s, response code %d (%s)", c.registry, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
}
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
@@ -512,7 +580,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
err := ping("https")
|
||||
if err != nil && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
|
||||
if err != nil && c.insecureSkipTLSVerify {
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
@@ -523,9 +591,9 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ping %s err %s (%#v)", url, err.Error(), err)
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
@@ -536,7 +604,7 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return true
|
||||
}
|
||||
isV1 := pingV1("https")
|
||||
if !isV1 && c.sys != nil && c.sys.DockerInsecureSkipTLSVerify {
|
||||
if !isV1 && c.insecureSkipTLSVerify {
|
||||
isV1 = pingV1("http")
|
||||
}
|
||||
if isV1 {
|
||||
@@ -546,11 +614,18 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties(ctx context.Context) error {
|
||||
c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) })
|
||||
return c.detectPropertiesError
|
||||
}
|
||||
|
||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
4
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -66,14 +66,14 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
||||
tags := make([]string, 0)
|
||||
|
||||
for {
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := client.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
|
||||
var tagsHolder struct {
|
||||
|
||||
178
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
178
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
@@ -110,27 +111,36 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *dockerImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() != "" {
|
||||
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil)
|
||||
res, err := d.c.makeRequest(ctx, "POST", uploadPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -147,7 +157,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, v2Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
@@ -160,13 +170,13 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure
|
||||
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
|
||||
|
||||
locationQuery := uploadLocation.Query()
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
locationQuery.Set("digest", computedDigest.String())
|
||||
uploadLocation.RawQuery = locationQuery.Encode()
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
|
||||
res, err = d.c.makeRequestToResolvedURL(ctx, "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
@@ -177,21 +187,17 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
|
||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
@@ -202,17 +208,143 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
default:
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %d (%s)", reference.Path(d.ref.ref), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
|
||||
u := url.URL{
|
||||
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
|
||||
RawQuery: url.Values{
|
||||
"mount": {srcDigest.String()},
|
||||
"from": {reference.Path(srcRepo)},
|
||||
}.Encode(),
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusCreated:
|
||||
logrus.Debugf("... mount OK")
|
||||
return nil
|
||||
case http.StatusAccepted:
|
||||
// Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
|
||||
// Abort, and let the ultimate caller do an upload when its ready, instead.
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
defer res2.Body.Close()
|
||||
if res2.StatusCode != http.StatusNoContent {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
// Anyway, if canceling the upload fails, ignore it and return the more important error:
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an expanded token scope.
|
||||
// We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
|
||||
if d.c.extraScope != nil {
|
||||
return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
|
||||
}
|
||||
defer func() {
|
||||
d.c.extraScope = nil
|
||||
}()
|
||||
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
|
||||
d.c.extraScope = &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
// So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
|
||||
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||
// so, be a nice client and don't create unnecesary upload sessions on the server.
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
// FIXME? Should we drop the blob from cache here (and elsewhere?)?
|
||||
continue // logrus.Debug() already happened in blobExists
|
||||
}
|
||||
if candidateRepo.Name() != d.ref.ref.Name() {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
|
||||
logrus.Debugf("... Mount failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
@@ -237,7 +369,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte) erro
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m))
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, headers, bytes.NewReader(m), v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -442,7 +574,7 @@ sigExists:
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body))
|
||||
res, err := d.c.makeRequest(ctx, "PUT", path, nil, bytes.NewReader(body), v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
35
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
35
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -89,7 +89,7 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = manifest.DefaultRequestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, headers, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -137,10 +137,10 @@ func (s *dockerImageSource) getExternalBlob(ctx context.Context, urls []string)
|
||||
err error
|
||||
)
|
||||
for _, url := range urls {
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, false)
|
||||
resp, err = s.c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, noAuth)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
err = errors.Errorf("error fetching external blob from %q: %d (%s)", url, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
continue
|
||||
}
|
||||
@@ -161,22 +161,30 @@ func getBlobSize(resp *http.Response) int64 {
|
||||
return size
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *dockerImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil)
|
||||
res, err := s.c.makeRequest(ctx, "GET", path, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
@@ -274,7 +282,7 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d (%s)", url.String(), res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -310,7 +318,14 @@ func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, i
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error {
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "push")
|
||||
// docker/distribution does not document what action should be used for deleting images.
|
||||
//
|
||||
// Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it.
|
||||
// quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included.
|
||||
// OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user).
|
||||
//
|
||||
// We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything".
|
||||
c, err := newDockerClientFromRef(sys, ref, true, "*")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -325,7 +340,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
return err
|
||||
}
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil)
|
||||
get, err := c.makeRequest(ctx, "GET", getPath, headers, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -347,7 +362,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil)
|
||||
delete, err := c.makeRequest(ctx, "DELETE", deletePath, headers, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
42
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
42
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@@ -82,13 +82,19 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, we only accept schema2 images where EmbeddedDockerReferenceConflicts() is always false.
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *Destination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
@@ -120,12 +126,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
}
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, size, err := d.HasBlob(ctx, inputInfo)
|
||||
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
@@ -151,29 +157,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with
|
||||
// the matching digest which can be reapplied using ReapplyBlob. Unlike
|
||||
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
|
||||
// the blob must also be returned. If the destination does not contain the
|
||||
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
|
||||
// returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, blob.Size, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob
|
||||
// previously returned true would have been passed to PutBlob if it had
|
||||
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
|
||||
// empty. If the blob is a filesystem layer, this signifies that the changes
|
||||
// it describes need to be applied again when composing a filesystem tree.
|
||||
func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
|
||||
|
||||
9
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
9
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@@ -397,8 +397,15 @@ func (r uncompressedReadCloser) Close() error {
|
||||
return res
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *Source) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
66
vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json
generated
vendored
66
vendor/github.com/containers/image/docs/atomic-signature-embedded-json.json
generated
vendored
@@ -1,66 +0,0 @@
|
||||
{
|
||||
"title": "JSON embedded in an atomic container signature",
|
||||
"description": "This schema is a supplement to atomic-signature.md in this directory.\n\nConsumers of the JSON MUST use the processing rules documented in atomic-signature.md, especially the requirements for the 'critical' subjobject.\n\nWhenever this schema and atomic-signature.md, or the github.com/containers/image/signature implementation, differ,\nit is the atomic-signature.md document, or the github.com/containers/image/signature implementation, which governs.\n\nUsers are STRONGLY RECOMMENDED to use the github.com/containeres/image/signature implementation instead of writing\ntheir own, ESPECIALLY when consuming signatures, so that the policy.json format can be shared by all image consumers.\n",
|
||||
"type": "object",
|
||||
"required": [
|
||||
"critical",
|
||||
"optional"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"critical": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"type",
|
||||
"image",
|
||||
"identity"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"atomic container signature"
|
||||
]
|
||||
},
|
||||
"image": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"docker-manifest-digest"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"docker-manifest-digest": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"identity": {
|
||||
"type": "object",
|
||||
"required": [
|
||||
"docker-reference"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"docker-reference": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"optional": {
|
||||
"type": "object",
|
||||
"description": "All members are optional, but if they are included, they must be valid.",
|
||||
"additionalProperties": true,
|
||||
"properties": {
|
||||
"creator": {
|
||||
"type": "string"
|
||||
},
|
||||
"timestamp": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
241
vendor/github.com/containers/image/docs/atomic-signature.md
generated
vendored
241
vendor/github.com/containers/image/docs/atomic-signature.md
generated
vendored
@@ -1,241 +0,0 @@
|
||||
% atomic-signature(5) Atomic signature format
|
||||
% Miloslav Trmač
|
||||
% March 2017
|
||||
|
||||
# Atomic signature format
|
||||
|
||||
This document describes the format of “atomic” container signatures,
|
||||
as implemented by the `github.com/containers/image/signature` package.
|
||||
|
||||
Most users should be able to consume these signatures by using the `github.com/containers/image/signature` package
|
||||
(preferably through the higher-level `signature.PolicyContext` interface)
|
||||
without having to care about the details of the format described below.
|
||||
This documentation exists primarily for maintainers of the package
|
||||
and to allow independent reimplementations.
|
||||
|
||||
## High-level overview
|
||||
|
||||
The signature provides an end-to-end authenticated claim that a container image
|
||||
has been approved by a specific party (e.g. the creator of the image as their work,
|
||||
an automated build system as a result of an automated build,
|
||||
a company IT department approving the image for production) under a specified _identity_
|
||||
(e.g. an OS base image / specific application, with a specific version).
|
||||
|
||||
An atomic container signature consists of a cryptographic signature which identifies
|
||||
and authenticates who signed the image, and carries as a signed payload a JSON document.
|
||||
The JSON document identifies the image being signed, claims a specific identity of the
|
||||
image and if applicable, contains other information about the image.
|
||||
|
||||
The signatures do not modify the container image (the layers, configuration, manifest, …);
|
||||
e.g. their presence does not change the manifest digest used to identify the image in
|
||||
docker/distribution servers; rather, the signatures are associated with an immutable image.
|
||||
An image can have any number of signatures so signature distribution systems SHOULD support
|
||||
associating more than one signature with an image.
|
||||
|
||||
## The cryptographic signature
|
||||
|
||||
As distributed, the atomic container signature is a blob which contains a cryptographic signature
|
||||
in an industry-standard format, carrying a signed JSON payload (i.e. the blob contains both the
|
||||
JSON document and a signature of the JSON document; it is not a “detached signature” with
|
||||
independent blobs containing the JSON document and a cryptographic signature).
|
||||
|
||||
Currently the only defined cryptographic signature format is an OpenPGP signature (RFC 4880),
|
||||
but others may be added in the future. (The blob does not contain metadata identifying the
|
||||
cryptographic signature format. It is expected that most formats are sufficiently self-describing
|
||||
that this is not necessary and the configured expected public key provides another indication
|
||||
of the expected cryptographic signature format. Such metadata may be added in the future for
|
||||
newly added cryptographic signature formats, if necessary.)
|
||||
|
||||
Consumers of atomic container signatures SHOULD verify the cryptographic signature
|
||||
against one or more trusted public keys
|
||||
(e.g. defined in a [policy.json signature verification policy file](policy.json.md))
|
||||
before parsing or processing the JSON payload in _any_ way,
|
||||
in particular they SHOULD stop processing the container signature
|
||||
if the cryptographic signature verification fails, without even starting to process the JSON payload.
|
||||
|
||||
(Consumers MAY extract identification of the signing key and other metadata from the cryptographic signature,
|
||||
and the JSON payload, without verifying the signature, if the purpose is to allow managing the signature blobs,
|
||||
e.g. to list the authors and image identities of signatures associated with a single container image;
|
||||
if so, they SHOULD design the output of such processing to minimize the risk of users considering the output trusted
|
||||
or in any way usable for making policy decisions about the image.)
|
||||
|
||||
### OpenPGP signature verification
|
||||
|
||||
When verifying a cryptographic signature in the OpenPGP format,
|
||||
the consumer MUST verify at least the following aspects of the signature
|
||||
(like the `github.com/containers/image/signature` package does):
|
||||
|
||||
- The blob MUST be a “Signed Message” as defined RFC 4880 section 11.3.
|
||||
(e.g. it MUST NOT be an unsigned “Literal Message”, or any other non-signature format).
|
||||
- The signature MUST have been made by an expected key trusted for the purpose (and the specific container image).
|
||||
- The signature MUST be correctly formed and pass the cryptographic validation.
|
||||
- The signature MUST correctly authenticate the included JSON payload
|
||||
(in particular, the parsing of the JSON payload MUST NOT start before the complete payload has been cryptographically authenticated).
|
||||
- The signature MUST NOT be expired.
|
||||
|
||||
The consumer SHOULD have tests for its verification code which verify that signatures failing any of the above are rejected.
|
||||
|
||||
## JSON processing and forward compatibility
|
||||
|
||||
The payload of the cryptographic signature is a JSON document (RFC 7159).
|
||||
Consumers SHOULD parse it very strictly,
|
||||
refusing any signature which violates the expected format (e.g. missing members, incorrect member types)
|
||||
or can be interpreted ambiguously (e.g. a duplicated member in a JSON object).
|
||||
|
||||
Any violations of the JSON format or of other requirements in this document MAY be accepted if the JSON document can be recognized
|
||||
to have been created by a known-incorrect implementation (see [`optional.creator`](#optionalcreator) below)
|
||||
and if the semantics of the invalid document, as created by such an implementation, is clear.
|
||||
|
||||
The top-level value of the JSON document MUST be a JSON object with exactly two members, `critical` and `optional`,
|
||||
each a JSON object.
|
||||
|
||||
The `critical` object MUST contain a `type` member identifying the document as an atomic container signature
|
||||
(as defined [below](#criticaltype))
|
||||
and signature consumers MUST reject signatures which do not have this member or in which this member does not have the expected value.
|
||||
|
||||
To ensure forward compatibility (allowing older signature consumers to correctly
|
||||
accept or reject signatures created at a later date, with possible extensions to this format),
|
||||
consumers MUST reject the signature if the `critical` object, or _any_ of its subobjects,
|
||||
contain _any_ member or data value which is unrecognized, unsupported, invalid, or in any other way unexpected.
|
||||
At a minimum, this includes unrecognized members in a JSON object, or incorrect types of expected members.
|
||||
|
||||
For the same reason, consumers SHOULD accept any members with unrecognized names in the `optional` object,
|
||||
and MAY accept signatures where the object member is recognized but unsupported, or the value of the member is unsupported.
|
||||
Consumers still SHOULD reject signatures where a member of an `optional` object is supported but the value is recognized as invalid.
|
||||
|
||||
## JSON data format
|
||||
|
||||
An example of the full format follows, with detailed description below.
|
||||
To reiterate, consumers of the signature SHOULD perform successful cryptographic verification,
|
||||
and MUST reject unexpected data in the `critical` object, or in the top-level object, as described above.
|
||||
|
||||
```json
|
||||
{
|
||||
"critical": {
|
||||
"type": "atomic container signature",
|
||||
"image": {
|
||||
"docker-manifest-digest": "sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e"
|
||||
},
|
||||
"identity": {
|
||||
"docker-reference": "docker.io/library/busybox:latest"
|
||||
}
|
||||
},
|
||||
"optional": {
|
||||
"creator": "some software package v1.0.1-35",
|
||||
"timestamp": 1483228800,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### `critical`
|
||||
|
||||
This MUST be a JSON object which contains data critical to correctly evaluating the validity of a signature.
|
||||
|
||||
Consumers MUST reject any signature where the `critical` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
### `critical.type`
|
||||
|
||||
This MUST be a string with a string value exactly equal to `atomic container signature` (three words, including the spaces).
|
||||
|
||||
Signature consumers MUST reject signatures which do not have this member or this member does not have exactly the expected value.
|
||||
|
||||
(The consumers MAY support signatures with a different value of the `type` member, if any is defined in the future;
|
||||
if so, the rest of the JSON document is interpreted according to rules defining that value of `critical.type`,
|
||||
not by this document.)
|
||||
|
||||
### `critical.image`
|
||||
|
||||
This MUST be a JSON object which identifies the container image this signature applies to.
|
||||
|
||||
Consumers MUST reject any signature where the `critical.image` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
(Currently only the `docker-manifest-digest` way of identifying a container image is defined;
|
||||
alternatives to this may be defined in the future,
|
||||
but existing consumers are required to reject signatures which use formats they do not support.)
|
||||
|
||||
### `critical.image.docker-manifest-digest`
|
||||
|
||||
This MUST be a JSON string, in the `github.com/opencontainers/go-digest.Digest` string format.
|
||||
|
||||
The value of this member MUST match the manifest of the signed container image, as implemented in the docker/distribution manifest addressing system.
|
||||
|
||||
The consumer of the signature SHOULD verify the manifest digest against a fully verified signature before processing the contents of the image manifest in any other way
|
||||
(e.g. parsing the manifest further or downloading layers of the image).
|
||||
|
||||
Implementation notes:
|
||||
* A single container image manifest may have several valid manifest digest values, using different algorithms.
|
||||
* For “signed” [docker/distribution schema 1](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md) manifests,
|
||||
the manifest digest applies to the payload of the JSON web signature, not to the raw manifest blob.
|
||||
|
||||
### `critical.identity`
|
||||
|
||||
This MUST be a JSON object which identifies the claimed identity of the image (usually the purpose of the image, or the application, along with a version information),
|
||||
as asserted by the author of the signature.
|
||||
|
||||
Consumers MUST reject any signature where the `critical.identity` object contains any unrecognized, unsupported, invalid or in any other way unexpected member or data.
|
||||
|
||||
(Currently only the `docker-reference` way of claiming an image identity/purpose is defined;
|
||||
alternatives to this may be defined in the future,
|
||||
but existing consumers are required to reject signatures which use formats they do not support.)
|
||||
|
||||
### `critical.identity.docker-reference`
|
||||
|
||||
This MUST be a JSON string, in the `github.com/docker/distribution/reference` string format,
|
||||
and using the same normalization semantics (where e.g. `busybox:latest` is equivalent to `docker.io/library/busybox:latest`).
|
||||
If the normalization semantics allows multiple string representations of the claimed identity with equivalent meaning,
|
||||
the `critical.identity.docker-reference` member SHOULD use the fully explicit form (including the full host name and namespaces).
|
||||
|
||||
The value of this member MUST match the image identity/purpose expected by the consumer of the image signature and the image
|
||||
(again, accounting for the `docker/distribution/reference` normalization semantics).
|
||||
|
||||
In the most common case, this means that the `critical.identity.docker-reference` value must be equal to the docker/distribution reference used to refer to or download the image.
|
||||
However, depending on the specific application, users or system administrators may accept less specific matches
|
||||
(e.g. ignoring the tag value in the signature when pulling the `:latest` tag or when referencing an image by digest),
|
||||
or they may require `critical.identity.docker-reference` values with a completely different namespace to the reference used to refer to/download the image
|
||||
(e.g. requiring a `critical.identity.docker-reference` value which identifies the image as coming from a supplier when fetching it from a company-internal mirror of approved images).
|
||||
The software performing this verification SHOULD allow the users to define such a policy using the [policy.json signature verification policy file format](policy.json.md).
|
||||
|
||||
The `critical.identity.docker-reference` value SHOULD contain either a tag or digest;
|
||||
in most cases, it SHOULD use a tag rather than a digest. (See also the default [`matchRepoDigestOrExact` matching semantics in `policy.json`](policy.json.md#signedby).)
|
||||
|
||||
### `optional`
|
||||
|
||||
This MUST be a JSON object.
|
||||
|
||||
Consumers SHOULD accept any members with unrecognized names in the `optional` object,
|
||||
and MAY accept a signature where the object member is recognized but unsupported, or the value of the member is valid but unsupported.
|
||||
Consumers still SHOULD reject any signature where a member of an `optional` object is supported but the value is recognized as invalid.
|
||||
|
||||
### `optional.creator`
|
||||
|
||||
If present, this MUST be a JSON string, identifying the name and version of the software which has created the signature.
|
||||
|
||||
The contents of this string is not defined in detail; however each implementation creating atomic container signatures:
|
||||
|
||||
- SHOULD define the contents to unambiguously define the software in practice (e.g. it SHOULD contain the name of the software, not only the version number)
|
||||
- SHOULD use a build and versioning process which ensures that the contents of this string (e.g. an included version number)
|
||||
changes whenever the format or semantics of the generated signature changes in any way;
|
||||
it SHOULD not be possible for two implementations which use a different format or semantics to have the same `optional.creator` value
|
||||
- SHOULD use a format which is reasonably easy to parse in software (perhaps using a regexp),
|
||||
and which makes it easy enough to recognize a range of versions of a specific implementation
|
||||
(e.g. the version of the implementation SHOULD NOT be only a git hash, because they don’t have an easily defined ordering;
|
||||
the string should contain a version number, or at least a date of the commit).
|
||||
|
||||
Consumers of atomic container signatures MAY recognize specific values or sets of values of `optional.creator`
|
||||
(perhaps augmented with `optional.timestamp`),
|
||||
and MAY change their processing of the signature based on these values
|
||||
(usually to acommodate violations of this specification in past versions of the signing software which cannot be fixed retroactively),
|
||||
as long as the semantics of the invalid document, as created by such an implementation, is clear.
|
||||
|
||||
If consumers of signatures do change their behavior based on the `optional.creator` value,
|
||||
they SHOULD take care that the way they process the signatures is not inconsistent with
|
||||
strictly validating signature consumers.
|
||||
(I.e. it is acceptable for a consumer to accept a signature based on a specific `optional.creator` value
|
||||
if other implementations would completely reject the signature,
|
||||
but it would be very undesirable for the two kinds of implementations to accept the signature in different
|
||||
and inconsistent situations.)
|
||||
|
||||
### `optional.timestamp`
|
||||
|
||||
If present, this MUST be a JSON number, which is representable as a 64-bit integer, and identifies the time when the signature was created
|
||||
as the number of seconds since the UNIX epoch (Jan 1 1970 00:00 UTC).
|
||||
281
vendor/github.com/containers/image/docs/policy.json.md
generated
vendored
281
vendor/github.com/containers/image/docs/policy.json.md
generated
vendored
@@ -1,281 +0,0 @@
|
||||
% POLICY.JSON(5) policy.json Man Page
|
||||
% Miloslav Trmač
|
||||
% September 2016
|
||||
|
||||
# NAME
|
||||
policy.json - syntax for the signature verification policy file
|
||||
|
||||
## DESCRIPTION
|
||||
|
||||
Signature verification policy files are used to specify policy, e.g. trusted keys,
|
||||
applicable when deciding whether to accept an image, or individual signatures of that image, as valid.
|
||||
|
||||
The default policy is stored (unless overridden at compile-time) at `/etc/containers/policy.json`;
|
||||
applications performing verification may allow using a different policy instead.
|
||||
|
||||
## FORMAT
|
||||
|
||||
The signature verification policy file, usually called `policy.json`,
|
||||
uses a JSON format. Unlike some other JSON files, its parsing is fairly strict:
|
||||
unrecognized, duplicated or otherwise invalid fields cause the entire file,
|
||||
and usually the entire operation, to be rejected.
|
||||
|
||||
The purpose of the policy file is to define a set of *policy requirements* for a container image,
|
||||
usually depending on its location (where it is being pulled from) or otherwise defined identity.
|
||||
|
||||
Policy requirements can be defined for:
|
||||
|
||||
- An individual *scope* in a *transport*.
|
||||
The *transport* values are the same as the transport prefixes when pushing/pulling images (e.g. `docker:`, `atomic:`),
|
||||
and *scope* values are defined by each transport; see below for more details.
|
||||
|
||||
Usually, a scope can be defined to match a single image, and various prefixes of
|
||||
such a most specific scope define namespaces of matching images.
|
||||
- A default policy for a single transport, expressed using an empty string as a scope
|
||||
- A global default policy.
|
||||
|
||||
If multiple policy requirements match a given image, only the requirements from the most specific match apply,
|
||||
the more general policy requirements definitions are ignored.
|
||||
|
||||
This is expressed in JSON using the top-level syntax
|
||||
```js
|
||||
{
|
||||
"default": [/* policy requirements: global default */]
|
||||
"transports": {
|
||||
transport_name: {
|
||||
"": [/* policy requirements: default for transport $transport_name */],
|
||||
scope_1: [/* policy requirements: default for $scope_1 in $transport_name */],
|
||||
scope_2: [/*…*/]
|
||||
/*…*/
|
||||
},
|
||||
transport_name_2: {/*…*/}
|
||||
/*…*/
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The global `default` set of policy requirements is mandatory; all of the other fields
|
||||
(`transports` itself, any specific transport, the transport-specific default, etc.) are optional.
|
||||
|
||||
<!-- NOTE: Keep this in sync with transports/transports.go! -->
|
||||
## Supported transports and their scopes
|
||||
|
||||
### `atomic:`
|
||||
|
||||
The `atomic:` transport refers to images in an Atomic Registry.
|
||||
|
||||
Supported scopes use the form _hostname_[`:`_port_][`/`_namespace_[`/`_imagestream_ [`:`_tag_]]],
|
||||
i.e. either specifying a complete name of a tagged image, or prefix denoting
|
||||
a host/namespace/image stream.
|
||||
|
||||
*Note:* The _hostname_ and _port_ refer to the Docker registry host and port (the one used
|
||||
e.g. for `docker pull`), _not_ to the OpenShift API host and port.
|
||||
|
||||
### `dir:`
|
||||
|
||||
The `dir:` transport refers to images stored in local directories.
|
||||
|
||||
Supported scopes are paths of directories (either containing a single image or
|
||||
subdirectories possibly containing images).
|
||||
|
||||
*Note:* The paths must be absolute and contain no symlinks. Paths violating these requirements may be silently ignored.
|
||||
|
||||
The top-level scope `"/"` is forbidden; use the transport default scope `""`,
|
||||
for consistency with other transports.
|
||||
|
||||
### `docker:`
|
||||
|
||||
The `docker:` transport refers to images in a registry implementing the "Docker Registry HTTP API V2".
|
||||
|
||||
Scopes matching individual images are named Docker references *in the fully expanded form*, either
|
||||
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
|
||||
|
||||
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
|
||||
a repository namespace, or a registry host (by only specifying the host name).
|
||||
|
||||
### `oci:`
|
||||
|
||||
The `oci:` transport refers to images in directories compliant with "Open Container Image Layout Specification".
|
||||
|
||||
Supported scopes use the form _directory_`:`_tag_, and _directory_ referring to
|
||||
a directory containing one or more tags, or any of the parent directories.
|
||||
|
||||
*Note:* See `dir:` above for semantics and restrictions on the directory paths, they apply to `oci:` equivalently.
|
||||
|
||||
### `tarball:`
|
||||
|
||||
The `tarball:` transport refers to tarred up container root filesystems.
|
||||
|
||||
Scopes are ignored.
|
||||
|
||||
## Policy Requirements
|
||||
|
||||
Using the mechanisms above, a set of policy requirements is looked up. The policy requirements
|
||||
are represented as a JSON array of individual requirement objects. For an image to be accepted,
|
||||
*all* of the requirements must be satisfied simulatenously.
|
||||
|
||||
The policy requirements can also be used to decide whether an individual signature is accepted (= is signed by a recognized key of a known author);
|
||||
in that case some requirements may apply only to some signatures, but each signature must be accepted by *at least one* requirement object.
|
||||
|
||||
The following requirement objects are supported:
|
||||
|
||||
### `insecureAcceptAnything`
|
||||
|
||||
A simple requirement with the following syntax
|
||||
|
||||
```json
|
||||
{"type":"insecureAcceptAnything"}
|
||||
```
|
||||
|
||||
This requirement accepts any image (but note that other requirements in the array still apply).
|
||||
|
||||
When deciding to accept an individual signature, this requirement does not have any effect; it does *not* cause the signature to be accepted, though.
|
||||
|
||||
This is useful primarily for policy scopes where no signature verification is required;
|
||||
because the array of policy requirements must not be empty, this requirement is used
|
||||
to represent the lack of requirements explicitly.
|
||||
|
||||
### `reject`
|
||||
|
||||
A simple requirement with the following syntax:
|
||||
|
||||
```json
|
||||
{"type":"reject"}
|
||||
```
|
||||
|
||||
This requirement rejects every image, and every signature.
|
||||
|
||||
### `signedBy`
|
||||
|
||||
This requirement requires an image to be signed with an expected identity, or accepts a signature if it is using an expected identity and key.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys", /* The only currently supported value */
|
||||
"keyPath": "/path/to/local/keyring/file",
|
||||
"keyData": "base64-encoded-keyring-data",
|
||||
"signedIdentity": identity_requirement
|
||||
}
|
||||
```
|
||||
<!-- Later: other keyType values -->
|
||||
|
||||
Exactly one of `keyPath` and `keyData` must be present, containing a GPG keyring of one or more public keys. Only signatures made by these keys are accepted.
|
||||
|
||||
The `signedIdentity` field, a JSON object, specifies what image identity the signature claims about the image.
|
||||
One of the following alternatives are supported:
|
||||
|
||||
- The identity in the signature must exactly match the image identity. Note that with this, referencing an image by digest (with a signature claiming a _repository_`:`_tag_ identity) will fail.
|
||||
|
||||
```json
|
||||
{"type":"matchExact"}
|
||||
```
|
||||
- If the image identity carries a tag, the identity in the signature must exactly match;
|
||||
if the image identity uses a digest reference, the identity in the signature must be in the same repository as the image identity (using any tag).
|
||||
|
||||
(Note that with images identified using digest references, the digest from the reference is validated even before signature verification starts.)
|
||||
|
||||
```json
|
||||
{"type":"matchRepoDigestOrExact"}
|
||||
```
|
||||
- The identity in the signature must be in the same repository as the image identity. This is useful e.g. to pull an image using the `:latest` tag when the image is signed with a tag specifing an exact image version.
|
||||
|
||||
```json
|
||||
{"type":"matchRepository"}
|
||||
```
|
||||
- The identity in the signature must exactly match a specified identity.
|
||||
This is useful e.g. when locally mirroring images signed using their public identity.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "exactReference",
|
||||
"dockerReference": docker_reference_value
|
||||
}
|
||||
```
|
||||
- The identity in the signature must be in the same repository as a specified identity.
|
||||
This combines the properties of `matchRepository` and `exactReference`.
|
||||
|
||||
```js
|
||||
{
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": docker_repository_value
|
||||
}
|
||||
```
|
||||
|
||||
If the `signedIdentity` field is missing, it is treated as `matchRepoDigestOrExact`.
|
||||
|
||||
*Note*: `matchExact`, `matchRepoDigestOrExact` and `matchRepository` can be only used if a Docker-like image identity is
|
||||
provided by the transport. In particular, the `dir:` and `oci:` transports can be only
|
||||
used with `exactReference` or `exactRepository`.
|
||||
|
||||
<!-- ### `signedBaseLayer` -->
|
||||
|
||||
## Examples
|
||||
|
||||
It is *strongly* recommended to set the `default` policy to `reject`, and then
|
||||
selectively allow individual transports and scopes as desired.
|
||||
|
||||
### A reasonably locked-down system
|
||||
|
||||
(Note that the `/*`…`*/` comments are not valid in JSON, and must not be used in real policies.)
|
||||
|
||||
```js
|
||||
{
|
||||
"default": [{"type": "reject"}], /* Reject anything not explicitly allowed */
|
||||
"transports": {
|
||||
"docker": {
|
||||
/* Allow installing images from a specific repository namespace, without cryptographic verification.
|
||||
This namespace includes images like openshift/hello-openshift and openshift/origin. */
|
||||
"docker.io/openshift": [{"type": "insecureAcceptAnything"}],
|
||||
/* Similarly, allow installing the “official” busybox images. Note how the fully expanded
|
||||
form, with the explicit /library/, must be used. */
|
||||
"docker.io/library/busybox": [{"type": "insecureAcceptAnything"}]
|
||||
/* Other docker: images use the global default policy and are rejected */
|
||||
},
|
||||
"dir": {
|
||||
"": [{"type": "insecureAcceptAnything"}] /* Allow any images originating in local directories */
|
||||
},
|
||||
"atomic": {
|
||||
/* The common case: using a known key for a repository or set of repositories */
|
||||
"hostname:5000/myns/official": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/official-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
/* A more complex example, for a repository which contains a mirror of a third-party product,
|
||||
which must be signed-off by local IT */
|
||||
"hostname:5000/vendor/product": [
|
||||
{ /* Require the image to be signed by the original vendor, using the vendor's repository location. */
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/vendor-pubkey.gpg",
|
||||
"signedIdentity": {
|
||||
"type": "exactRepository",
|
||||
"dockerRepository": "vendor-hostname/product/repository"
|
||||
}
|
||||
},
|
||||
{ /* Require the image to _also_ be signed by a local reviewer. */
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "/path/to/reviewer-pubkey.gpg"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Completely disable security, allow all images, do not trust any signatures
|
||||
|
||||
```json
|
||||
{
|
||||
"default": [{"type": "insecureAcceptAnything"}]
|
||||
}
|
||||
```
|
||||
## SEE ALSO
|
||||
atomic(1)
|
||||
|
||||
## HISTORY
|
||||
September 2016, Originally compiled by Miloslav Trmač <mitr@redhat.com>
|
||||
50
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
50
vendor/github.com/containers/image/docs/registries.conf.5.md
generated
vendored
@@ -1,50 +0,0 @@
|
||||
% registries.conf(5) System-wide registry configuration file
|
||||
% Brent Baude
|
||||
% Aug 2017
|
||||
|
||||
# NAME
|
||||
registries.conf - Syntax of System Registry Configuration File
|
||||
|
||||
# DESCRIPTION
|
||||
The REGISTRIES configuration file is a system-wide configuration file for container image
|
||||
registries. The file format is TOML. The valid categories are: 'registries.search',
|
||||
'registries.insecure', and 'registries.block'.
|
||||
|
||||
# FORMAT
|
||||
The TOML_format is used to build a simple list format for registries under three
|
||||
categories: `registries.search`, `registries.insecure`, and `registries.block`.
|
||||
You can list multiple registries using a comma separated list.
|
||||
|
||||
Search registries are used when the caller of a container runtime does not fully specify the
|
||||
container image that they want to execute. These registries are prepended onto the front
|
||||
of the specified container image until the named image is found at a registry.
|
||||
|
||||
Insecure Registries. By default container runtimes use TLS when retrieving images
|
||||
from a registry. If the registry is not setup with TLS, then the container runtime
|
||||
will fail to pull images from the registry. If you add the registry to the list of
|
||||
insecure registries then the container runtime will attempt use standard web protocols to
|
||||
pull the image. It also allows you to pull from a registry with self-signed certificates.
|
||||
Note insecure registries can be used for any registry, not just the registries listed
|
||||
under search.
|
||||
|
||||
Block Registries. The registries in this category are are not pulled from when
|
||||
retrieving images.
|
||||
|
||||
# EXAMPLE
|
||||
The following example configuration defines two searchable registries, one
|
||||
insecure registry, and two blocked registries.
|
||||
|
||||
```
|
||||
[registries.search]
|
||||
registries = ['registry1.com', 'registry2.com']
|
||||
|
||||
[registries.insecure]
|
||||
registries = ['registry3.com']
|
||||
|
||||
[registries.block]
|
||||
registries = ['registry.untrusted.com', 'registry.unsafe.com']
|
||||
```
|
||||
|
||||
# HISTORY
|
||||
Aug 2017, Originally compiled by Brent Baude <bbaude@redhat.com>
|
||||
Jun 2018, Updated by Tom Sweeney <tsweeney@redhat.com>
|
||||
124
vendor/github.com/containers/image/docs/registries.d.md
generated
vendored
124
vendor/github.com/containers/image/docs/registries.d.md
generated
vendored
@@ -1,124 +0,0 @@
|
||||
% REGISTRIES.D(5) Registries.d Man Page
|
||||
% Miloslav Trmač
|
||||
% August 2016
|
||||
# Registries Configuration Directory
|
||||
|
||||
The registries configuration directory contains configuration for various registries
|
||||
(servers storing remote container images), and for content stored in them,
|
||||
so that the configuration does not have to be provided in command-line options over and over for every command,
|
||||
and so that it can be shared by all users of containers/image.
|
||||
|
||||
By default (unless overridden at compile-time), the registries configuration directory is `/etc/containers/registries.d`;
|
||||
applications may allow using a different directory instead.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
The directory may contain any number of files with the extension `.yaml`,
|
||||
each using the YAML format. Other than the mandatory extension, names of the files
|
||||
don’t matter.
|
||||
|
||||
The contents of these files are merged together; to have a well-defined and easy to understand
|
||||
behavior, there can be only one configuration section describing a single namespace within a registry
|
||||
(in particular there can be at most one one `default-docker` section across all files,
|
||||
and there can be at most one instance of any key under the the `docker` section;
|
||||
these sections are documented later).
|
||||
|
||||
Thus, it is forbidden to have two conflicting configurations for a single registry or scope,
|
||||
and it is also forbidden to split a configuration for a single registry or scope across
|
||||
more than one file (even if they are not semantically in conflict).
|
||||
|
||||
## Registries, Scopes and Search Order
|
||||
|
||||
Each YAML file must contain a “YAML mapping” (key-value pairs). Two top-level keys are defined:
|
||||
|
||||
- `default-docker` is the _configuration section_ (as documented below)
|
||||
for registries implementing "Docker Registry HTTP API V2".
|
||||
|
||||
This key is optional.
|
||||
|
||||
- `docker` is a mapping, using individual registries implementing "Docker Registry HTTP API V2",
|
||||
or namespaces and individual images within these registries, as keys;
|
||||
the value assigned to any such key is a _configuration section_.
|
||||
|
||||
This key is optional.
|
||||
|
||||
Scopes matching individual images are named Docker references *in the fully expanded form*, either
|
||||
using a tag or digest. For example, `docker.io/library/busybox:latest` (*not* `busybox:latest`).
|
||||
|
||||
More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest),
|
||||
a repository namespace, or a registry host (and a port if it differs from the default).
|
||||
|
||||
Note that if a registry is accessed using a hostname+port configuration, the port-less hostname
|
||||
is _not_ used as parent scope.
|
||||
|
||||
When searching for a configuration to apply for an individual container image, only
|
||||
the configuration for the most-precisely matching scope is used; configuration using
|
||||
more general scopes is ignored. For example, if _any_ configuration exists for
|
||||
`docker.io/library/busybox`, the configuration for `docker.io` is ignored
|
||||
(even if some element of the configuration is defined for `docker.io` and not for `docker.io/library/busybox`).
|
||||
|
||||
## Individual Configuration Sections
|
||||
|
||||
A single configuration section is selected for a container image using the process
|
||||
described above. The configuration section is a YAML mapping, with the following keys:
|
||||
|
||||
- `sigstore-staging` defines an URL of of the signature storage, used for editing it (adding or deleting signatures).
|
||||
|
||||
This key is optional; if it is missing, `sigstore` below is used.
|
||||
|
||||
- `sigstore` defines an URL of the signature storage.
|
||||
This URL is used for reading existing signatures,
|
||||
and if `sigstore-staging` does not exist, also for adding or removing them.
|
||||
|
||||
This key is optional; if it is missing, no signature storage is defined (no signatures
|
||||
are download along with images, adding new signatures is possible only if `sigstore-staging` is defined).
|
||||
|
||||
## Examples
|
||||
|
||||
### Using Containers from Various Origins
|
||||
|
||||
The following demonstrates how to to consume and run images from various registries and namespaces:
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
registry.database-supplier.com:
|
||||
sigstore: https://sigstore.database-supplier.com
|
||||
distribution.great-middleware.org:
|
||||
sigstore: https://security-team.great-middleware.org/sigstore
|
||||
docker.io/web-framework:
|
||||
sigstore: https://sigstore.web-framework.io:8080
|
||||
```
|
||||
|
||||
### Developing and Signing Containers, Staging Signatures
|
||||
|
||||
For developers in `example.com`:
|
||||
|
||||
- Consume most container images using the public servers also used by clients.
|
||||
- Use a separate sigure storage for an container images in a namespace corresponding to the developers' department, with a staging storage used before publishing signatures.
|
||||
- Craft an individual exception for a single branch a specific developer is working on locally.
|
||||
|
||||
```yaml
|
||||
docker:
|
||||
registry.example.com:
|
||||
sigstore: https://registry-sigstore.example.com
|
||||
registry.example.com/mydepartment:
|
||||
sigstore: https://sigstore.mydepartment.example.com
|
||||
sigstore-staging: file:///mnt/mydepartment/sigstore-staging
|
||||
registry.example.com/mydepartment/myproject:mybranch:
|
||||
sigstore: http://localhost:4242/sigstore
|
||||
sigstore-staging: file:///home/useraccount/webroot/sigstore
|
||||
```
|
||||
|
||||
### A Global Default
|
||||
|
||||
If a company publishes its products using a different domain, and different registry hostname for each of them, it is still possible to use a single signature storage server
|
||||
without listing each domain individually. This is expected to rarely happen, usually only for staging new signatures.
|
||||
|
||||
```yaml
|
||||
default-docker:
|
||||
sigstore-staging: file:///mnt/company/common-sigstore-staging
|
||||
```
|
||||
|
||||
# AUTHORS
|
||||
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
136
vendor/github.com/containers/image/docs/signature-protocols.md
generated
vendored
@@ -1,136 +0,0 @@
|
||||
# Signature access protocols
|
||||
|
||||
The `github.com/containers/image` library supports signatures implemented as blobs “attached to” an image.
|
||||
Some image transports (local storage formats and remote procotocols) implement these signatures natively
|
||||
or trivially; for others, the protocol extensions described below are necessary.
|
||||
|
||||
## docker/distribution registries—separate storage
|
||||
|
||||
### Usage
|
||||
|
||||
Any existing docker/distribution registry, whether or not it natively supports signatures,
|
||||
can be augmented with separate signature storage by configuring a signature storage URL in [`registries.d`](registries.d.md).
|
||||
`registries.d` can be configured to use one storage URL for a whole docker/distribution server,
|
||||
or also separate URLs for smaller namespaces or individual repositories within the server
|
||||
(which e.g. allows image authors to manage their own signature storage while publishing
|
||||
the images on the public `docker.io` server).
|
||||
|
||||
The signature storage URL defines a root of a path hierarchy.
|
||||
It can be either a `file:///…` URL, pointing to a local directory structure,
|
||||
or a `http`/`https` URL, pointing to a remote server.
|
||||
`file:///` signature storage can be both read and written, `http`/`https` only supports reading.
|
||||
|
||||
The same path hierarchy is used in both cases, so the HTTP/HTTPS server can be
|
||||
a simple static web server serving a directory structure created by writing to a `file:///` signature storage.
|
||||
(This of course does not prevent other server implementations,
|
||||
e.g. a HTTP server reading signatures from a database.)
|
||||
|
||||
The usual workflow for producing and distributing images using the separate storage mechanism
|
||||
is to configure the repository in `registries.d` with `sigstore-staging` URL pointing to a private
|
||||
`file:///` staging area, and a `sigstore` URL pointing to a public web server.
|
||||
To publish an image, the image author would sign the image as necessary (e.g. using `skopeo copy`),
|
||||
and then copy the created directory structure from the `file:///` staging area
|
||||
to a subdirectory of a webroot of the public web server so that they are accessible using the public `sigstore` URL.
|
||||
The author would also instruct consumers of the image to, or provide a `registries.d` configuration file to,
|
||||
set up a `sigstore` URL pointing to the public web server.
|
||||
|
||||
### Path structure
|
||||
|
||||
Given a _base_ signature storage URL configured in `registries.d` as mentioned above,
|
||||
and a container image stored in a docker/distribution registry using the _fully-expanded_ name
|
||||
_hostname_`/`_namespaces_`/`_name_{`@`_digest_,`:`_tag_} (e.g. for `docker.io/library/busybox:latest`,
|
||||
_namespaces_ is `library`, even if the user refers to the image using the shorter syntax as `busybox:latest`),
|
||||
signatures are accessed using URLs of the form
|
||||
> _base_`/`_namespaces_`/`_name_`@`_digest-algo_`=`_digest-value_`/signature-`_index_
|
||||
|
||||
where _digest-algo_`:`_digest-value_ is a manifest digest usable for referencing the relevant image manifest
|
||||
(i.e. even if the user referenced the image using a tag,
|
||||
the signature storage is always disambiguated using digest references).
|
||||
Note that in the URLs used for signatures,
|
||||
_digest-algo_ and _digest-value_ are separated using the `=` character,
|
||||
not `:` like when acessing the manifest using the docker/distribution API.
|
||||
|
||||
Within the URL, _index_ is a decimal integer (in the canonical form), starting with 1.
|
||||
Signatures are stored at URLs with successive _index_ values; to read all of them, start with _index_=1,
|
||||
and continue reading signatures and increasing _index_ as long as signatures with these _index_ values exist.
|
||||
Similarly, to add one more signature to an image, find the first _index_ which does not exist, and
|
||||
then store the new signature using that _index_ value.
|
||||
|
||||
There is no way to list existing signatures other than iterating through the successive _index_ values,
|
||||
and no way to download all of the signatures at once.
|
||||
|
||||
### Examples
|
||||
|
||||
For a docker/distribution image available as `busybox@sha256:817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e`
|
||||
(or as `busybox:latest` if the `latest` tag points to to a manifest with the same digest),
|
||||
and with a `registries.d` configuration specifying a `sigstore` URL `https://example.com/sigstore` for the same image,
|
||||
the following URLs would be accessed to download all signatures:
|
||||
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-1`
|
||||
> - `https://example.com/sigstore/library/busybox@sha256=817a12c32a39bbe394944ba49de563e085f1d3c5266eb8e9723256bc4448680e/signature-2`
|
||||
> - …
|
||||
|
||||
For a docker/distribution image available as `example.com/ns1/ns2/ns3/repo@somedigest:digestvalue` and the same
|
||||
`sigstore` URL, the signatures would be available at
|
||||
> `https://example.com/sigstore/ns1/ns2/ns3/repo@somedigest=digestvalue/signature-1`
|
||||
|
||||
and so on.
|
||||
|
||||
## (OpenShift) docker/distribution API extension
|
||||
|
||||
As of https://github.com/openshift/origin/pull/12504/ , the OpenShift-embedded registry also provides
|
||||
an extension of the docker/distribution API which allows simpler access to the signatures,
|
||||
using only the docker/distribution API endpoint.
|
||||
|
||||
This API is not inherently OpenShift-specific (e.g. the client does not need to know the OpenShift API endpoint,
|
||||
and credentials sufficient to access the docker/distribution API server are sufficient to access signatures as well),
|
||||
and it is the preferred way implement signature storage in registries.
|
||||
|
||||
See https://github.com/openshift/openshift-docs/pull/3556 for the upstream documentation of the API.
|
||||
|
||||
To read the signature, any user with access to an image can use the `/extensions/v2/…/signatures/…`
|
||||
path to read an array of signatures. Use only the signature objects
|
||||
which have `version` equal to `2`, `type` equal to `atomic`, and read the signature from `content`;
|
||||
ignore the other fields of the signature object.
|
||||
|
||||
To add a single signature, `PUT` a new object with `version` set to `2`, `type` set to `atomic`,
|
||||
and `content` set to the signature. Also set `name` to an unique name with the form
|
||||
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (also used in the URL),
|
||||
and _per-image-name_ is any unique identifier.
|
||||
|
||||
To add more than one signature, add them one at a time. This API does not allow deleting signatures.
|
||||
|
||||
Note that because signatures are stored within the cluster-wide image objects,
|
||||
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||
(by default available to the `system:image-signer` role),
|
||||
|
||||
## OpenShift-embedded registries
|
||||
|
||||
The OpenShift-embedded registry implements the ordinary docker/distribution API,
|
||||
and it also exposes images through the OpenShift REST API (available through the “API master” servers).
|
||||
|
||||
Note: OpenShift versions 1.5 and later support the above-described [docker/distribution API extension](#openshift-dockerdistribution-api-extension),
|
||||
which is easier to set up and should usually be preferred.
|
||||
Continue reading for details on using older versions of OpenShift.
|
||||
|
||||
As of https://github.com/openshift/origin/pull/9181,
|
||||
signatures are exposed through the OpenShift API
|
||||
(i.e. to access the complete image, it is necessary to use both APIs,
|
||||
in particular to know the URLs for both the docker/distribution and the OpenShift API master endpoints).
|
||||
|
||||
To read the signature, any user with access to an image can use the `imagestreamimages` namespaced
|
||||
resource to read an `Image` object and its `Signatures` array. Use only the `ImageSignature` objects
|
||||
which have `Type` equal to `atomic`, and read the signature from `Content`; ignore the other fields of
|
||||
the `ImageSignature` object.
|
||||
|
||||
To add or remove signatures, use the cluster-wide (non-namespaced) `imagesignatures` resource,
|
||||
with `Type` set to `atomic` and `Content` set to the signature. Signature names must have the form
|
||||
_digest_`@`_per-image-name_, where _digest_ is an image manifest digest (OpenShift “image name”),
|
||||
and _per-image-name_ is any unique identifier.
|
||||
|
||||
Note that because signatures are stored within the cluster-wide image objects,
|
||||
i.e. different namespaces can not associate different sets of signatures to the same image,
|
||||
updating signatures requires a cluster-wide access to the `imagesignatures` resource
|
||||
(by default available to the `system:image-signer` role),
|
||||
and deleting signatures is strongly discouraged
|
||||
(it deletes the signature from all namespaces which contain the same image).
|
||||
7
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
7
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
if historyEntry.EmptyLayer {
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
|
||||
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
|
||||
3
vendor/github.com/containers/image/image/oci.go
generated
vendored
3
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
2
vendor/github.com/containers/image/manifest/docker_schema1.go
generated
vendored
@@ -275,7 +275,7 @@ func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) {
|
||||
raw := make(map[string]*json.RawMessage)
|
||||
err = json.Unmarshal(config, &raw)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s1)
|
||||
return nil, errors.Wrapf(err, "error re-decoding compat image config %#v", s1)
|
||||
}
|
||||
// Drop some fields.
|
||||
delete(raw, "id")
|
||||
|
||||
5
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
5
vendor/github.com/containers/image/manifest/docker_schema2.go
generated
vendored
@@ -57,8 +57,9 @@ type Schema2HealthConfig struct {
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
|
||||
34
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
34
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
@@ -25,7 +25,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref ociA
|
||||
unpackedDest, err := tempDirRef.ociRefExtracted.NewImageDestination(ctx, sys)
|
||||
if err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -77,20 +77,32 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.unpackedDest.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ociArchiveImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
|
||||
func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.unpackedDest.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination
|
||||
|
||||
15
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
15
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
@@ -28,7 +28,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref ociArchiv
|
||||
unpackedSrc, err := tempDirRef.ociRefExtracted.NewImageSource(ctx, sys)
|
||||
if err != nil {
|
||||
if err := tempDirRef.deleteTempDir(); err != nil {
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory", tempDirRef.tempDirectory)
|
||||
return nil, errors.Wrapf(err, "error deleting temp directory %q", tempDirRef.tempDirectory)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
@@ -76,9 +76,16 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
|
||||
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info)
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ociArchiveImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
|
||||
38
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
38
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@@ -107,13 +107,20 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ociImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -173,30 +180,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
11
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
11
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
@@ -92,8 +92,15 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return m, mimeType, nil
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ociImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
||||
44
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
44
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@@ -96,7 +96,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
|
||||
logrus.Debugf("%s %s", method, url)
|
||||
logrus.Debugf("%s %s", method, url.String())
|
||||
res, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -127,7 +127,7 @@ func (c *openshiftClient) doRequest(ctx context.Context, method, path string, re
|
||||
if statusValid {
|
||||
return nil, errors.New(status.Message)
|
||||
}
|
||||
return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
return nil, errors.Errorf("HTTP error: status code: %d (%s), body: %s", res.StatusCode, http.StatusText(res.StatusCode), string(body))
|
||||
}
|
||||
|
||||
return body, nil
|
||||
@@ -211,12 +211,19 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
|
||||
return s.docker.GetManifest(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *openshiftImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureImageIsResolved(ctx); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return s.docker.GetBlob(ctx, info)
|
||||
return s.docker.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
@@ -376,26 +383,31 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.docker.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *openshiftImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.docker.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.docker.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
||||
49
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
49
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
@@ -4,7 +4,6 @@ package ostree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -24,6 +23,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
selinux "github.com/opencontainers/selinux/go-selinux"
|
||||
"github.com/ostreedev/ostree-go/pkg/otbuiltin"
|
||||
@@ -132,7 +132,20 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (d *ostreeImageDestination) HasThreadSafePutBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -241,7 +254,7 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin
|
||||
}
|
||||
|
||||
func generateTarSplitMetadata(output *bytes.Buffer, file string) (digest.Digest, int64, error) {
|
||||
mfz := gzip.NewWriter(output)
|
||||
mfz := pgzip.NewWriter(output)
|
||||
defer mfz.Close()
|
||||
metaPacker := storage.NewJSONPacker(mfz)
|
||||
|
||||
@@ -322,12 +335,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
||||
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if d.repo == nil {
|
||||
repo, err := openRepo(d.ref.repo)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
d.repo = repo
|
||||
}
|
||||
@@ -335,29 +354,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
|
||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
found, data, err = readMetadata(d.repo, branch, "docker.size")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(data, 10, 64)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
return true, size, nil
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
@@ -467,7 +482,9 @@ func (d *ostreeImageDestination) Commit(ctx context.Context) error {
|
||||
metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)),
|
||||
fmt.Sprintf("signatures=%d", d.signaturesLen),
|
||||
fmt.Sprintf("docker.digest=%s", string(d.digest))}
|
||||
err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata)
|
||||
if err := d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = repo.CommitTransaction()
|
||||
return err
|
||||
|
||||
15
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
15
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
@@ -4,7 +4,6 @@ package ostree
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
@@ -17,6 +16,7 @@ import (
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/opencontainers/go-digest"
|
||||
glib "github.com/ostreedev/ostree-go/pkg/glibobject"
|
||||
"github.com/pkg/errors"
|
||||
@@ -255,8 +255,15 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
|
||||
return getter.Get(path)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *ostreeImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
|
||||
blob := info.Digest.Hex()
|
||||
|
||||
@@ -302,7 +309,7 @@ func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
|
||||
}
|
||||
|
||||
mf := bytes.NewReader(tarsplit)
|
||||
mfz, err := gzip.NewReader(mf)
|
||||
mfz, err := pgzip.NewReader(mf)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
329
vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
generated
vendored
Normal file
329
vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
generated
vendored
Normal file
@@ -0,0 +1,329 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
|
||||
// we can simply start over with a different filename; update blobInfoCacheFilename.
|
||||
|
||||
// FIXME: For CRI-O, does this need to hide information between different users?
|
||||
|
||||
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
||||
uncompressedDigestBucket = []byte("uncompressedDigest")
|
||||
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
||||
// (as a set of key=digest, value="" pairs)
|
||||
digestByUncompressedBucket = []byte("digestByUncompressed")
|
||||
// knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
|
||||
// a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
|
||||
knownLocationsBucket = []byte("knownLocations")
|
||||
)
|
||||
|
||||
// Concurrency:
|
||||
// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
|
||||
// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
|
||||
|
||||
// pathLock contains a lock for a specific BoltDB database path.
|
||||
type pathLock struct {
|
||||
refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
|
||||
mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
|
||||
}
|
||||
|
||||
var (
|
||||
// pathLocks contains a lock for each currently open file.
|
||||
// This must be global so that independently created instances of boltDBCache exclude each other.
|
||||
// The map is protected by pathLocksMutex.
|
||||
// FIXME? Should this be based on device:inode numbers instead of paths instead?
|
||||
pathLocks = map[string]*pathLock{}
|
||||
pathLocksMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
// lockPath obtains the pathLock for path.
|
||||
// The caller must call unlockPath eventually.
|
||||
func lockPath(path string) {
|
||||
pl := func() *pathLock { // A scope for defer
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if ok {
|
||||
pl.refCount++
|
||||
} else {
|
||||
pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
|
||||
pathLocks[path] = pl
|
||||
}
|
||||
return pl
|
||||
}()
|
||||
pl.mutex.Lock()
|
||||
}
|
||||
|
||||
// unlockPath releases the pathLock for path.
|
||||
func unlockPath(path string) {
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if !ok {
|
||||
// Should this return an error instead? BlobInfoCache ultimately ignores errors…
|
||||
panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
|
||||
}
|
||||
pl.mutex.Unlock()
|
||||
pl.refCount--
|
||||
if pl.refCount == 0 {
|
||||
delete(pathLocks, path)
|
||||
}
|
||||
}
|
||||
|
||||
// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
//
|
||||
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
|
||||
// users; instead, we need to open/close it for every single write or lookup.
|
||||
type boltDBCache struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
// Most users should call DefaultCache instead.
|
||||
func NewBoltDBCache(path string) types.BlobInfoCache {
|
||||
return &boltDBCache{path: path}
|
||||
}
|
||||
|
||||
// view returns runs the specified fn within a read-only transaction on the database.
|
||||
func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
|
||||
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
|
||||
// a read lock, blocking any future writes.
|
||||
// Hence this preliminary check, which is RACY: Another process could remove the file
|
||||
// between the Lstat call and opening the database.
|
||||
if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.View(fn)
|
||||
}
|
||||
|
||||
// update returns runs the specified fn within a read-write transaction on the database.
|
||||
func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.Update(fn)
|
||||
}
|
||||
|
||||
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
|
||||
func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
|
||||
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
|
||||
d, err := digest.Parse(string(uncompressedBytes))
|
||||
if err == nil {
|
||||
return d
|
||||
}
|
||||
// FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
}
|
||||
// Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if b := tx.Bucket(digestByUncompressedBucket); b != nil {
|
||||
if b = b.Bucket([]byte(anyDigest.String())); b != nil {
|
||||
c := b.Cursor()
|
||||
if k, _ := c.First(); k != nil { // The bucket is non-empty
|
||||
return anyDigest
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
var res digest.Digest
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
res = bdc.uncompressedDigest(tx, anyDigest)
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
previous, err := digest.Parse(string(previousBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := time.Now().MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
|
||||
func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
|
||||
b := scopeBucket.Bucket([]byte(digest.String()))
|
||||
if b == nil {
|
||||
return candidates
|
||||
}
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
t := time.Time{}
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
candidates = append(candidates, candidateWithTime{
|
||||
candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: types.BICLocationReference{Opaque: string(k)},
|
||||
},
|
||||
lastSeen: t,
|
||||
})
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []candidateWithTime{}
|
||||
var uncompressedDigestValue digest.Digest // = ""
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(knownLocationsBucket)
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
|
||||
if canSubstitute {
|
||||
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
||||
b := tx.Bucket(digestByUncompressedBucket)
|
||||
if b != nil {
|
||||
b = b.Bucket([]byte(uncompressedDigestValue.String()))
|
||||
if b != nil {
|
||||
if err := b.ForEach(func(k, _ []byte) error {
|
||||
d, err := digest.Parse(string(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d != primaryDigest && d != uncompressedDigestValue {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, d)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigestValue != primaryDigest {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
}
|
||||
63
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
Normal file
63
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// blobInfoCacheFilename is the file name used for blob info caches.
|
||||
// If the format changes in an incompatible way, increase the version number.
|
||||
blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
|
||||
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
|
||||
systemBlobInfoCacheDir = "/var/lib/containers/cache"
|
||||
)
|
||||
|
||||
// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
|
||||
// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
|
||||
func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
||||
if sys != nil && sys.BlobInfoCacheDir != "" {
|
||||
return sys.BlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
|
||||
// and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
|
||||
if euid == 0 {
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
|
||||
}
|
||||
return systemBlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
|
||||
dataDir := os.Getenv("XDG_DATA_HOME")
|
||||
if dataDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
|
||||
}
|
||||
dataDir = filepath.Join(home, ".local", "share")
|
||||
}
|
||||
return filepath.Join(dataDir, "containers", "cache"), nil
|
||||
}
|
||||
|
||||
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
||||
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||
dir, err := blobInfoCacheDir(sys, os.Geteuid())
|
||||
if err != nil {
|
||||
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
||||
return NewMemoryCache()
|
||||
}
|
||||
path := filepath.Join(dir, blobInfoCacheFilename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err)
|
||||
return NewMemoryCache()
|
||||
}
|
||||
|
||||
logrus.Debugf("Using blob info cache at %s", path)
|
||||
return NewBoltDBCache(path)
|
||||
}
|
||||
123
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
Normal file
123
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
Normal file
@@ -0,0 +1,123 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// locationKey only exists to make lookup in knownLocations easier.
|
||||
type locationKey struct {
|
||||
transport string
|
||||
scope types.BICTransportScope
|
||||
blobDigest digest.Digest
|
||||
}
|
||||
|
||||
// memoryCache implements an in-memory-only BlobInfoCache
|
||||
type memoryCache struct {
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
|
||||
// This is primarily intended for tests, but also used as a fallback if DefaultCache
|
||||
// can’t determine, or set up, the location for a persistent cache.
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
|
||||
func NewMemoryCache() types.BlobInfoCache {
|
||||
return &memoryCache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||
return d
|
||||
}
|
||||
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
|
||||
return anyDigest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
mem.uncompressedDigests[anyDigest] = uncompressed
|
||||
|
||||
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
|
||||
if !ok {
|
||||
anyDigestSet = map[digest.Digest]struct{}{}
|
||||
mem.digestsByUncompressed[uncompressed] = anyDigestSet
|
||||
}
|
||||
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||
locationScope, ok := mem.knownLocations[key]
|
||||
if !ok {
|
||||
locationScope = map[types.BICLocationReference]time.Time{}
|
||||
mem.knownLocations[key] = locationScope
|
||||
}
|
||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, candidateWithTime{
|
||||
candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: l,
|
||||
},
|
||||
lastSeen: t,
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []candidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d)
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
|
||||
}
|
||||
}
|
||||
}
|
||||
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
}
|
||||
47
vendor/github.com/containers/image/pkg/blobinfocache/none.go
generated
vendored
Normal file
47
vendor/github.com/containers/image/pkg/blobinfocache/none.go
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// noCache implements a dummy BlobInfoCache which records no data.
|
||||
type noCache struct {
|
||||
}
|
||||
|
||||
// NoCache implements BlobInfoCache by not recording any data.
|
||||
//
|
||||
// This exists primarily for implementations of configGetter for Manifest.Inspect,
|
||||
// because configs only have one representation.
|
||||
// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
|
||||
var NoCache types.BlobInfoCache = noCache{}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return nil
|
||||
}
|
||||
108
vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
generated
vendored
Normal file
108
vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
generated
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type candidateWithTime struct {
|
||||
candidate types.BICReplacementCandidate // The replacement candidate
|
||||
lastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
type candidateSortState struct {
|
||||
cs []candidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Len() int {
|
||||
return len(css.cs)
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Less(i, j int) bool {
|
||||
xi := css.cs[i]
|
||||
xj := css.cs[j]
|
||||
|
||||
// primaryDigest entries come first, more recent first.
|
||||
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.candidate.Digest != xj.candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
}
|
||||
if xj.candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
}
|
||||
if xj.candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else { // xi.candidate.Digest == xj.candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.lastSeen.After(xj.lastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
|
||||
return xi.lastSeen.After(xj.lastSeen)
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.candidate.Digest < xj.candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
|
||||
resLength := len(cs)
|
||||
if resLength > maxCandidates {
|
||||
resLength = maxCandidates
|
||||
}
|
||||
res := make([]types.BICReplacementCandidate, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].candidate
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
}
|
||||
4
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
4
vendor/github.com/containers/image/pkg/compression/compression.go
generated
vendored
@@ -3,10 +3,10 @@ package compression
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/ulikunitz/xz"
|
||||
@@ -18,7 +18,7 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
|
||||
func GzipDecompressor(r io.Reader) (io.ReadCloser, error) {
|
||||
return gzip.NewReader(r)
|
||||
return pgzip.NewReader(r)
|
||||
}
|
||||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
|
||||
9
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
9
vendor/github.com/containers/image/pkg/docker/config/config.go
generated
vendored
@@ -165,9 +165,12 @@ func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) {
|
||||
var auths dockerConfigFile
|
||||
|
||||
raw, err := ioutil.ReadFile(path)
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
auths.AuthConfigs = map[string]dockerAuthConfig{}
|
||||
return auths, nil
|
||||
}
|
||||
return dockerConfigFile{}, err
|
||||
}
|
||||
|
||||
if legacyFormat {
|
||||
|
||||
396
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
396
vendor/github.com/containers/image/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
Normal file
@@ -0,0 +1,396 @@
|
||||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// systemRegistriesConfPath is the path to the system-wide registry
|
||||
// configuration file and is used to add/subtract potential registries for
|
||||
// obtaining images. You can override this at build time with
|
||||
// -ldflags '-X github.com/containers/image/sysregistries.systemRegistriesConfPath=$your_path'
|
||||
var systemRegistriesConfPath = builtinRegistriesConfPath
|
||||
|
||||
// builtinRegistriesConfPath is the path to the registry configuration file.
|
||||
// DO NOT change this, instead see systemRegistriesConfPath above.
|
||||
const builtinRegistriesConfPath = "/etc/containers/registries.conf"
|
||||
|
||||
// Mirror represents a mirror. Mirrors can be used as pull-through caches for
|
||||
// registries.
|
||||
type Mirror struct {
|
||||
// The mirror's URL.
|
||||
URL string `toml:"url"`
|
||||
// If true, certs verification will be skipped and HTTP (non-TLS)
|
||||
// connections will be allowed.
|
||||
Insecure bool `toml:"insecure"`
|
||||
}
|
||||
|
||||
// Registry represents a registry.
|
||||
type Registry struct {
|
||||
// Serializable registry URL.
|
||||
URL string `toml:"url"`
|
||||
// The registry's mirrors.
|
||||
Mirrors []Mirror `toml:"mirror"`
|
||||
// If true, pulling from the registry will be blocked.
|
||||
Blocked bool `toml:"blocked"`
|
||||
// If true, certs verification will be skipped and HTTP (non-TLS)
|
||||
// connections will be allowed.
|
||||
Insecure bool `toml:"insecure"`
|
||||
// If true, the registry can be used when pulling an unqualified image.
|
||||
Search bool `toml:"unqualified-search"`
|
||||
// Prefix is used for matching images, and to translate one namespace to
|
||||
// another. If `Prefix="example.com/bar"`, `URL="example.com/foo/bar"`
|
||||
// and we pull from "example.com/bar/myimage:latest", the image will
|
||||
// effectively be pulled from "example.com/foo/bar/myimage:latest".
|
||||
// If no Prefix is specified, it defaults to the specified URL.
|
||||
Prefix string `toml:"prefix"`
|
||||
}
|
||||
|
||||
// backwards compatability to sysregistries v1
|
||||
type v1TOMLregistries struct {
|
||||
Registries []string `toml:"registries"`
|
||||
}
|
||||
|
||||
// tomlConfig is the data type used to unmarshal the toml config.
|
||||
type tomlConfig struct {
|
||||
Registries []Registry `toml:"registry"`
|
||||
// backwards compatability to sysregistries v1
|
||||
V1Registries struct {
|
||||
Search v1TOMLregistries `toml:"search"`
|
||||
Insecure v1TOMLregistries `toml:"insecure"`
|
||||
Block v1TOMLregistries `toml:"block"`
|
||||
} `toml:"registries"`
|
||||
}
|
||||
|
||||
// InvalidRegistries represents an invalid registry configurations. An example
|
||||
// is when "registry.com" is defined multiple times in the configuration but
|
||||
// with conflicting security settings.
|
||||
type InvalidRegistries struct {
|
||||
s string
|
||||
}
|
||||
|
||||
// Error returns the error string.
|
||||
func (e *InvalidRegistries) Error() string {
|
||||
return e.s
|
||||
}
|
||||
|
||||
// parseURL parses the input string, performs some sanity checks and returns
|
||||
// the sanitized input string. An error is returned if the input string is
|
||||
// empty or if contains an "http{s,}://" prefix.
|
||||
func parseURL(input string) (string, error) {
|
||||
trimmed := strings.TrimRight(input, "/")
|
||||
|
||||
if trimmed == "" {
|
||||
return "", &InvalidRegistries{s: "invalid URL: cannot be empty"}
|
||||
}
|
||||
|
||||
if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") {
|
||||
msg := fmt.Sprintf("invalid URL '%s': URI schemes are not supported", input)
|
||||
return "", &InvalidRegistries{s: msg}
|
||||
}
|
||||
|
||||
return trimmed, nil
|
||||
}
|
||||
|
||||
// getV1Registries transforms v1 registries in the config into an array of v2
|
||||
// registries of type Registry.
|
||||
func getV1Registries(config *tomlConfig) ([]Registry, error) {
|
||||
regMap := make(map[string]*Registry)
|
||||
// We must preserve the order of config.V1Registries.Search.Registries at least. The order of the
|
||||
// other registries is not really important, but make it deterministic (the same for the same config file)
|
||||
// to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations.
|
||||
registryOrder := []string{}
|
||||
|
||||
getRegistry := func(url string) (*Registry, error) { // Note: _pointer_ to a long-lived object
|
||||
var err error
|
||||
url, err = parseURL(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg, exists := regMap[url]
|
||||
if !exists {
|
||||
reg = &Registry{
|
||||
URL: url,
|
||||
Mirrors: []Mirror{},
|
||||
Prefix: url,
|
||||
}
|
||||
regMap[url] = reg
|
||||
registryOrder = append(registryOrder, url)
|
||||
}
|
||||
return reg, nil
|
||||
}
|
||||
|
||||
// Note: config.V1Registries.Search needs to be processed first to ensure registryOrder is populated in the right order
|
||||
// if one of the search registries is also in one of the other lists.
|
||||
for _, search := range config.V1Registries.Search.Registries {
|
||||
reg, err := getRegistry(search)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Search = true
|
||||
}
|
||||
for _, blocked := range config.V1Registries.Block.Registries {
|
||||
reg, err := getRegistry(blocked)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Blocked = true
|
||||
}
|
||||
for _, insecure := range config.V1Registries.Insecure.Registries {
|
||||
reg, err := getRegistry(insecure)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reg.Insecure = true
|
||||
}
|
||||
|
||||
registries := []Registry{}
|
||||
for _, url := range registryOrder {
|
||||
reg := regMap[url]
|
||||
registries = append(registries, *reg)
|
||||
}
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// postProcessRegistries checks the consistency of all registries (e.g., set
|
||||
// the Prefix to URL if not set) and applies conflict checks. It returns an
|
||||
// array of cleaned registries and error in case of conflicts.
|
||||
func postProcessRegistries(regs []Registry) ([]Registry, error) {
|
||||
var registries []Registry
|
||||
regMap := make(map[string][]Registry)
|
||||
|
||||
for _, reg := range regs {
|
||||
var err error
|
||||
|
||||
// make sure URL and Prefix are valid
|
||||
reg.URL, err = parseURL(reg.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if reg.Prefix == "" {
|
||||
reg.Prefix = reg.URL
|
||||
} else {
|
||||
reg.Prefix, err = parseURL(reg.Prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// make sure mirrors are valid
|
||||
for _, mir := range reg.Mirrors {
|
||||
mir.URL, err = parseURL(mir.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
registries = append(registries, reg)
|
||||
regMap[reg.URL] = append(regMap[reg.URL], reg)
|
||||
}
|
||||
|
||||
// Given a registry can be mentioned multiple times (e.g., to have
|
||||
// multiple prefixes backed by different mirrors), we need to make sure
|
||||
// there are no conflicts among them.
|
||||
//
|
||||
// Note: we need to iterate over the registries array to ensure a
|
||||
// deterministic behavior which is not guaranteed by maps.
|
||||
for _, reg := range registries {
|
||||
others, _ := regMap[reg.URL]
|
||||
for _, other := range others {
|
||||
if reg.Insecure != other.Insecure {
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.URL)
|
||||
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
if reg.Blocked != other.Blocked {
|
||||
msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.URL)
|
||||
return nil, &InvalidRegistries{s: msg}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// getConfigPath returns the system-registries config path if specified.
|
||||
// Otherwise, systemRegistriesConfPath is returned.
|
||||
func getConfigPath(ctx *types.SystemContext) string {
|
||||
confPath := systemRegistriesConfPath
|
||||
if ctx != nil {
|
||||
if ctx.SystemRegistriesConfPath != "" {
|
||||
confPath = ctx.SystemRegistriesConfPath
|
||||
} else if ctx.RootForImplicitAbsolutePaths != "" {
|
||||
confPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath)
|
||||
}
|
||||
}
|
||||
return confPath
|
||||
}
|
||||
|
||||
// configMutex is used to synchronize concurrent accesses to configCache.
|
||||
var configMutex = sync.Mutex{}
|
||||
|
||||
// configCache caches already loaded configs with config paths as keys and is
|
||||
// used to avoid redudantly parsing configs. Concurrent accesses to the cache
|
||||
// are synchronized via configMutex.
|
||||
var configCache = make(map[string][]Registry)
|
||||
|
||||
// InvalidateCache invalidates the registry cache. This function is meant to be
|
||||
// used for long-running processes that need to reload potential changes made to
|
||||
// the cached registry config files.
|
||||
func InvalidateCache() {
|
||||
configMutex.Lock()
|
||||
defer configMutex.Unlock()
|
||||
configCache = make(map[string][]Registry)
|
||||
}
|
||||
|
||||
// GetRegistries loads and returns the registries specified in the config.
|
||||
// Note the parsed content of registry config files is cached. For reloading,
|
||||
// use `InvalidateCache` and re-call `GetRegistries`.
|
||||
func GetRegistries(ctx *types.SystemContext) ([]Registry, error) {
|
||||
configPath := getConfigPath(ctx)
|
||||
|
||||
configMutex.Lock()
|
||||
defer configMutex.Unlock()
|
||||
// if the config has already been loaded, return the cached registries
|
||||
if registries, inCache := configCache[configPath]; inCache {
|
||||
return registries, nil
|
||||
}
|
||||
|
||||
// load the config
|
||||
config, err := loadRegistryConf(configPath)
|
||||
if err != nil {
|
||||
// Return an empty []Registry if we use the default config,
|
||||
// which implies that the config path of the SystemContext
|
||||
// isn't set. Note: if ctx.SystemRegistriesConfPath points to
|
||||
// the default config, we will still return an error.
|
||||
if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") {
|
||||
return []Registry{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
registries := config.Registries
|
||||
|
||||
// backwards compatibility for v1 configs
|
||||
v1Registries, err := getV1Registries(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(v1Registries) > 0 {
|
||||
if len(registries) > 0 {
|
||||
return nil, &InvalidRegistries{s: "mixing sysregistry v1/v2 is not supported"}
|
||||
}
|
||||
registries = v1Registries
|
||||
}
|
||||
|
||||
registries, err = postProcessRegistries(registries)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// populate the cache
|
||||
configCache[configPath] = registries
|
||||
|
||||
return registries, err
|
||||
}
|
||||
|
||||
// FindUnqualifiedSearchRegistries returns all registries that are configured
|
||||
// for unqualified image search (i.e., with Registry.Search == true).
|
||||
func FindUnqualifiedSearchRegistries(ctx *types.SystemContext) ([]Registry, error) {
|
||||
registries, err := GetRegistries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
unqualified := []Registry{}
|
||||
for _, reg := range registries {
|
||||
if reg.Search {
|
||||
unqualified = append(unqualified, reg)
|
||||
}
|
||||
}
|
||||
return unqualified, nil
|
||||
}
|
||||
|
||||
// refMatchesPrefix returns true iff ref,
|
||||
// which is a registry, repository namespace, repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
// — note that this requires the name to start with an explicit hostname!),
|
||||
// matches a Registry.Prefix value.
|
||||
// (This is split from the caller primarily to make testing easier.)
|
||||
func refMatchesPrefix(ref, prefix string) bool {
|
||||
switch {
|
||||
case len(ref) < len(prefix):
|
||||
return false
|
||||
case len(ref) == len(prefix):
|
||||
return ref == prefix
|
||||
case len(ref) > len(prefix):
|
||||
if !strings.HasPrefix(ref, prefix) {
|
||||
return false
|
||||
}
|
||||
c := ref[len(prefix)]
|
||||
// This allows "example.com:5000" to match "example.com",
|
||||
// which is unintended; that will get fixed eventually, DON'T RELY
|
||||
// ON THE CURRENT BEHAVIOR.
|
||||
return c == ':' || c == '/' || c == '@'
|
||||
default:
|
||||
panic("Internal error: impossible comparison outcome")
|
||||
}
|
||||
}
|
||||
|
||||
// FindRegistry returns the Registry with the longest prefix for ref,
|
||||
// which is a registry, repository namespace repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
// — note that this requires the name to start with an explicit hostname!).
|
||||
// If no Registry prefixes the image, nil is returned.
|
||||
func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) {
|
||||
registries, err := GetRegistries(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reg := Registry{}
|
||||
prefixLen := 0
|
||||
for _, r := range registries {
|
||||
if refMatchesPrefix(ref, r.Prefix) {
|
||||
length := len(r.Prefix)
|
||||
if length > prefixLen {
|
||||
reg = r
|
||||
prefixLen = length
|
||||
}
|
||||
}
|
||||
}
|
||||
if prefixLen != 0 {
|
||||
return ®, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Reads the global registry file from the filesystem. Returns a byte array.
|
||||
func readRegistryConf(configPath string) ([]byte, error) {
|
||||
configBytes, err := ioutil.ReadFile(configPath)
|
||||
return configBytes, err
|
||||
}
|
||||
|
||||
// Used in unittests to parse custom configs without a types.SystemContext.
|
||||
var readConf = readRegistryConf
|
||||
|
||||
// Loads the registry configuration file from the filesystem and then unmarshals
|
||||
// it. Returns the unmarshalled object.
|
||||
func loadRegistryConf(configPath string) (*tomlConfig, error) {
|
||||
config := &tomlConfig{}
|
||||
|
||||
configBytes, err := readConf(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(configBytes, &config)
|
||||
return config, err
|
||||
}
|
||||
20
vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
20
vendor/github.com/containers/image/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@@ -34,16 +34,26 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
for _, f := range fs {
|
||||
fullPath := filepath.Join(dir, f.Name())
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := ioutil.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
if tlsc.RootCAs == nil {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
}
|
||||
tlsc.RootCAs.AppendCertsFromPEM(data)
|
||||
}
|
||||
if strings.HasSuffix(f.Name(), ".cert") {
|
||||
|
||||
200
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
200
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@@ -11,11 +11,13 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
@@ -46,6 +48,7 @@ type storageImageSource struct {
|
||||
image *storage.Image
|
||||
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
@@ -55,6 +58,7 @@ type storageImageDestination struct {
|
||||
nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
signatures []byte // Signature contents, temporary
|
||||
putBlobMutex sync.Mutex // Mutex to sync state for parallel PutBlob executions
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
@@ -90,17 +94,24 @@ func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
}
|
||||
|
||||
// Reference returns the image reference that we used to find this image.
|
||||
func (s storageImageSource) Reference() types.ImageReference {
|
||||
func (s *storageImageSource) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
// Close cleans up any resources we tied up while reading the image.
|
||||
func (s storageImageSource) Close() error {
|
||||
func (s *storageImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (s *storageImageSource) HasThreadSafeGetBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
@@ -134,8 +145,10 @@ func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadC
|
||||
// Step through the list of matching layers. Tests may want to verify that if we have multiple layers
|
||||
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
|
||||
// just go ahead and use the first one every time.
|
||||
s.getBlobMutex.Lock()
|
||||
i := s.layerPosition[info.Digest]
|
||||
s.layerPosition[info.Digest] = i + 1
|
||||
s.getBlobMutex.Unlock()
|
||||
if len(layers) > 0 {
|
||||
layer = layers[i%len(layers)]
|
||||
}
|
||||
@@ -297,7 +310,7 @@ func newImageDestination(imageRef storageReference) (*storageImageDestination, e
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (s storageImageDestination) Reference() types.ImageReference {
|
||||
func (s *storageImageDestination) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
@@ -306,16 +319,33 @@ func (s *storageImageDestination) Close() error {
|
||||
return os.RemoveAll(s.directory)
|
||||
}
|
||||
|
||||
func (s storageImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
func (s *storageImageDestination) DesiredLayerCompression() types.LayerCompression {
|
||||
// We ultimately have to decompress layers to populate trees on disk,
|
||||
// so callers shouldn't bother compressing them before handing them to
|
||||
// us, if they're not already compressed.
|
||||
return types.PreserveOriginal
|
||||
}
|
||||
|
||||
// PutBlob stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
}
|
||||
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
func (s *storageImageDestination) HasThreadSafePutBlob() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
errorBlobInfo := types.BlobInfo{
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
@@ -328,7 +358,7 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
}
|
||||
diffID := digest.Canonical.Digester()
|
||||
filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
filename := s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename)
|
||||
@@ -355,9 +385,11 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return errorBlobInfo, ErrBlobSizeMismatch
|
||||
}
|
||||
// Record information about the blob.
|
||||
s.putBlobMutex.Lock()
|
||||
s.blobDiffIDs[hasher.Digest()] = diffID.Digest()
|
||||
s.fileSizes[hasher.Digest()] = counter.Count
|
||||
s.filenames[hasher.Digest()] = filename
|
||||
s.putBlobMutex.Unlock()
|
||||
blobDigest := blobinfo.Digest
|
||||
if blobDigest.Validate() != nil {
|
||||
blobDigest = hasher.Digest()
|
||||
@@ -366,6 +398,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
if blobSize < 0 {
|
||||
blobSize = counter.Count
|
||||
}
|
||||
// This is safe because we have just computed both values ourselves.
|
||||
cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
||||
return types.BlobInfo{
|
||||
Digest: blobDigest,
|
||||
Size: blobSize,
|
||||
@@ -373,59 +407,85 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
|
||||
// reapplied using ReapplyBlob.
|
||||
//
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.putBlobMutex.Lock()
|
||||
defer s.putBlobMutex.Unlock()
|
||||
if blobinfo.Digest == "" {
|
||||
return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
}
|
||||
if err := blobinfo.Digest.Validate(); err != nil {
|
||||
return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
|
||||
}
|
||||
|
||||
// Check if we've already cached it in a file.
|
||||
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
|
||||
return true, size, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: size,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
||||
return true, layers[0].UncompressedSize, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a was-compressed layer in storage that's based on that blob.
|
||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
||||
return true, layers[0].CompressedSize, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: layers[0].CompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
// Nope, we don't have it.
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
|
||||
// same one when it walks the list in the manifest.
|
||||
func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
present, size, err := s.HasBlob(ctx, blobinfo)
|
||||
if !present {
|
||||
return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
|
||||
// Does the blob correspond to a known DiffID which we already have available?
|
||||
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
|
||||
// uncompressed layer, and that can happen only if canSubstitute.
|
||||
if canSubstitute {
|
||||
if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
return true, types.BlobInfo{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
|
||||
}
|
||||
blobinfo.Size = size
|
||||
return blobinfo, nil
|
||||
|
||||
// Nope, we don't have it.
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// computeID computes a recommended image ID based on information we have so far. If
|
||||
@@ -504,15 +564,18 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
continue
|
||||
}
|
||||
|
||||
var diff io.ReadCloser
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
|
||||
if !haveDiffID {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seeen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.HasBlob(ctx, blob.BlobInfo)
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
@@ -533,19 +596,11 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
lastLayer = layer.ID
|
||||
continue
|
||||
}
|
||||
// Check if we cached a file with that blobsum. If we didn't already have a layer with
|
||||
// the blob's contents, we should have gotten a copy.
|
||||
if filename, ok := s.filenames[blob.Digest]; ok {
|
||||
// Use the file's contents to initialize the layer.
|
||||
file, err2 := os.Open(filename)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error opening file %q", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
diff = file
|
||||
}
|
||||
if diff == nil {
|
||||
// Try to find a layer with contents matching that blobsum.
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
// then we need to read the desired contents from a layer.
|
||||
filename, ok := s.filenames[blob.Digest]
|
||||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
@@ -559,25 +614,48 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if layer == "" {
|
||||
return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest)
|
||||
}
|
||||
// Use the layer's contents to initialize the new layer.
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
if err2 != nil {
|
||||
return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest)
|
||||
}
|
||||
defer diff.Close()
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
// the same lock, so the diff can't just be directly streamed from one
|
||||
// to the other.
|
||||
filename = s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return errors.Wrapf(err, "error creating temporary file %q", filename)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
// ctx.Done().
|
||||
_, err = io.Copy(file, diff)
|
||||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error storing blob to file %q", filename)
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
s.filenames[blob.Digest] = filename
|
||||
}
|
||||
if diff == nil {
|
||||
// This shouldn't have happened.
|
||||
return errors.Errorf("error applying blob %q: content not found", blob.Digest)
|
||||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error opening file %q", filename)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, diff)
|
||||
if err != nil {
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, nil, file)
|
||||
if err != nil && errors.Cause(err) != storage.ErrDuplicateID {
|
||||
return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
|
||||
2
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
2
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
@@ -107,7 +107,7 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap {
|
||||
// relative to the given store, and returns it in a reference object.
|
||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||
if ref == "" {
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference")
|
||||
return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference", ref)
|
||||
}
|
||||
if ref[0] == '[' {
|
||||
// Ignore the store specifier.
|
||||
|
||||
15
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
15
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
@@ -2,7 +2,6 @@ package tarball
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -14,7 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
|
||||
"github.com/klauspost/pgzip"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecs "github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@@ -77,7 +76,7 @@ func (r *tarballReference) NewImageSource(ctx context.Context, sys *types.System
|
||||
|
||||
// Set up to digest the file after we maybe decompress it.
|
||||
diffIDdigester := digest.Canonical.Digester()
|
||||
uncompressed, err := gzip.NewReader(reader)
|
||||
uncompressed, err := pgzip.NewReader(reader)
|
||||
if err == nil {
|
||||
// It is compressed, so the diffID is the digest of the uncompressed version
|
||||
reader = io.TeeReader(uncompressed, diffIDdigester.Hash())
|
||||
@@ -207,7 +206,15 @@ func (is *tarballImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
func (is *tarballImageSource) HasThreadSafeGetBlob() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
// We should only be asked about things in the manifest. Maybe the configuration blob.
|
||||
if blobinfo.Digest == is.configID {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
|
||||
|
||||
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
_ "github.com/containers/image/directory"
|
||||
_ "github.com/containers/image/docker"
|
||||
_ "github.com/containers/image/docker/archive"
|
||||
_ "github.com/containers/image/docker/daemon"
|
||||
_ "github.com/containers/image/oci/archive"
|
||||
_ "github.com/containers/image/oci/layout"
|
||||
_ "github.com/containers/image/openshift"
|
||||
|
||||
8
vendor/github.com/containers/image/transports/alltransports/docker_daemon.go
generated
vendored
Normal file
8
vendor/github.com/containers/image/transports/alltransports/docker_daemon.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
// +build !containers_image_docker_daemon_stub
|
||||
|
||||
package alltransports
|
||||
|
||||
import (
|
||||
// Register the docker-daemon transport
|
||||
_ "github.com/containers/image/docker/daemon"
|
||||
)
|
||||
9
vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go
generated
vendored
Normal file
9
vendor/github.com/containers/image/transports/alltransports/docker_daemon_stub.go
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
// +build containers_image_docker_daemon_stub
|
||||
|
||||
package alltransports
|
||||
|
||||
import "github.com/containers/image/transports"
|
||||
|
||||
func init() {
|
||||
transports.Register(transports.NewStubTransport("docker-daemon"))
|
||||
}
|
||||
132
vendor/github.com/containers/image/types/types.go
generated
vendored
132
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -100,6 +100,82 @@ type BlobInfo struct {
|
||||
MediaType string
|
||||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
|
||||
// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
|
||||
// at least by not failing hard when encountering unknown data.
|
||||
type BICTransportScope struct {
|
||||
Opaque string
|
||||
}
|
||||
|
||||
// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
|
||||
// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
|
||||
// can look it up using BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
|
||||
// at least by not failing hard when encountering unknown data.
|
||||
type BICLocationReference struct {
|
||||
Opaque string
|
||||
}
|
||||
|
||||
// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
|
||||
type BICReplacementCandidate struct {
|
||||
Digest digest.Digest
|
||||
Location BICLocationReference
|
||||
}
|
||||
|
||||
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
|
||||
//
|
||||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
//
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
//
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
//
|
||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||
// users of the cahce should just fall back to copying the blobs the usual way.
|
||||
type BlobInfoCache interface {
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
UncompressedDigest(anyDigest digest.Digest) digest.Digest
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
|
||||
}
|
||||
|
||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
|
||||
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
||||
// is usually more useful.
|
||||
@@ -120,7 +196,10 @@ type ImageSource interface {
|
||||
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
|
||||
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
||||
HasThreadSafeGetBlob() bool
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
@@ -148,8 +227,7 @@ const (
|
||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||
//
|
||||
// There is a specific required order for some of the calls:
|
||||
// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
|
||||
// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
|
||||
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
|
||||
//
|
||||
@@ -183,17 +261,21 @@ type ImageDestination interface {
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
|
||||
ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
|
||||
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
|
||||
// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently.
|
||||
HasThreadSafePutBlob() bool
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
||||
// PutManifest writes manifest to the destination.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
@@ -324,6 +406,30 @@ type DockerAuthConfig struct {
|
||||
Password string
|
||||
}
|
||||
|
||||
// OptionalBool is a boolean with an additional undefined value, which is meant
|
||||
// to be used in the context of user input to distinguish between a
|
||||
// user-specified value and a default value.
|
||||
type OptionalBool byte
|
||||
|
||||
const (
|
||||
// OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written.
|
||||
OptionalBoolUndefined OptionalBool = iota
|
||||
// OptionalBoolTrue represents the boolean true.
|
||||
OptionalBoolTrue
|
||||
// OptionalBoolFalse represents the boolean false.
|
||||
OptionalBoolFalse
|
||||
)
|
||||
|
||||
// NewOptionalBool converts the input bool into either OptionalBoolTrue or
|
||||
// OptionalBoolFalse. The function is meant to avoid boilerplate code of users.
|
||||
func NewOptionalBool(b bool) OptionalBool {
|
||||
o := OptionalBoolFalse
|
||||
if b == true {
|
||||
o = OptionalBoolTrue
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
// SystemContext allows parameterizing access to implicitly-accessed resources,
|
||||
// like configuration files in /etc and users' login state in their home directory.
|
||||
// Various components can share the same field only if their semantics is exactly
|
||||
@@ -351,6 +457,8 @@ type SystemContext struct {
|
||||
ArchitectureChoice string
|
||||
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
|
||||
OSChoice string
|
||||
// If not "", overrides the system's default directory containing a blob info cache.
|
||||
BlobInfoCacheDir string
|
||||
|
||||
// Additional tags when creating or copying a docker-archive.
|
||||
DockerArchiveAdditionalTags []reference.NamedTagged
|
||||
@@ -376,7 +484,7 @@ type SystemContext struct {
|
||||
// Ignored if DockerCertPath is non-empty.
|
||||
DockerPerHostCertDirPath string
|
||||
// Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
|
||||
DockerInsecureSkipTLSVerify bool
|
||||
DockerInsecureSkipTLSVerify OptionalBool
|
||||
// if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
|
||||
DockerAuthConfig *DockerAuthConfig
|
||||
// if not "", an User-Agent header is added to each request when contacting a registry.
|
||||
|
||||
13
vendor/github.com/containers/image/vendor.conf
generated
vendored
13
vendor/github.com/containers/image/vendor.conf
generated
vendored
@@ -16,7 +16,7 @@ github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
|
||||
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
||||
github.com/opencontainers/go-digest c9281466c8b2f606084ac71339773efd177436e7
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
|
||||
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||
@@ -26,20 +26,25 @@ github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
||||
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
||||
golang.org/x/sync 42b317875d0fa942474b76e1b46a6060d720ae6e
|
||||
golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08
|
||||
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.27
|
||||
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d
|
||||
github.com/opencontainers/selinux 077c8b6d1c18456fb7c792bc0de52295a0d1900e
|
||||
github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/ostreedev/ostree-go 56f3a639dbc0f2f5051c6d52dade28a882ba78ce
|
||||
github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8
|
||||
github.com/pquerna/ffjson master
|
||||
github.com/syndtr/gocapability master
|
||||
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
|
||||
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt master
|
||||
github.com/klauspost/pgzip v1.2.1
|
||||
github.com/klauspost/compress v1.4.1
|
||||
github.com/klauspost/cpuid v1.2.0
|
||||
|
||||
2
vendor/github.com/containers/storage/README.md
generated
vendored
2
vendor/github.com/containers/storage/README.md
generated
vendored
@@ -2,7 +2,7 @@
|
||||
layers, container images, and containers. A `containers-storage` CLI wrapper
|
||||
is also included for manual and scripting use.
|
||||
|
||||
To build the CLI wrapper, use 'make build-binary'.
|
||||
To build the CLI wrapper, use 'make binary'.
|
||||
|
||||
Operations which use VMs expect to launch them using 'vagrant', defaulting to
|
||||
using its 'libvirt' provider. The boxes used are also available for the
|
||||
|
||||
33
vendor/github.com/containers/storage/containers.go
generated
vendored
33
vendor/github.com/containers/storage/containers.go
generated
vendored
@@ -133,6 +133,27 @@ func copyContainer(c *Container) *Container {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Container) MountLabel() string {
|
||||
if label, ok := c.Flags["MountLabel"].(string); ok {
|
||||
return label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Container) ProcessLabel() string {
|
||||
if label, ok := c.Flags["ProcessLabel"].(string); ok {
|
||||
return label
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *Container) MountOpts() []string {
|
||||
if mountOpts, ok := c.Flags["MountOpts"].([]string); ok {
|
||||
return mountOpts
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *containerStore) Containers() ([]Container, error) {
|
||||
containers := make([]Container, len(r.containers))
|
||||
for i := range r.containers {
|
||||
@@ -192,6 +213,9 @@ func (r *containerStore) Load() error {
|
||||
}
|
||||
|
||||
func (r *containerStore) Save() error {
|
||||
if !r.Locked() {
|
||||
return errors.New("container store is not locked")
|
||||
}
|
||||
rpath := r.containerspath()
|
||||
if err := os.MkdirAll(filepath.Dir(rpath), 0700); err != nil {
|
||||
return err
|
||||
@@ -276,6 +300,9 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
if _, idInUse := r.byid[id]; idInUse {
|
||||
return nil, ErrDuplicateID
|
||||
}
|
||||
if options.MountOpts != nil {
|
||||
options.Flags["MountOpts"] = append([]string{}, options.MountOpts...)
|
||||
}
|
||||
names = dedupeNames(names)
|
||||
for _, name := range names {
|
||||
if _, nameInUse := r.byname[name]; nameInUse {
|
||||
@@ -294,7 +321,7 @@ func (r *containerStore) Create(id string, names []string, image, layer, metadat
|
||||
BigDataSizes: make(map[string]int64),
|
||||
BigDataDigests: make(map[string]digest.Digest),
|
||||
Created: time.Now().UTC(),
|
||||
Flags: make(map[string]interface{}),
|
||||
Flags: copyStringInterfaceMap(options.Flags),
|
||||
UIDMap: copyIDMap(options.UIDMap),
|
||||
GIDMap: copyIDMap(options.GIDMap),
|
||||
}
|
||||
@@ -560,3 +587,7 @@ func (r *containerStore) IsReadWrite() bool {
|
||||
func (r *containerStore) TouchedSince(when time.Time) bool {
|
||||
return r.lockfile.TouchedSince(when)
|
||||
}
|
||||
|
||||
func (r *containerStore) Locked() bool {
|
||||
return r.lockfile.Locked()
|
||||
}
|
||||
|
||||
1
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
1
vendor/github.com/containers/storage/containers_ffjson.go
generated
vendored
@@ -1,6 +1,5 @@
|
||||
// Code generated by ffjson <https://github.com/pquerna/ffjson>. DO NOT EDIT.
|
||||
// source: containers.go
|
||||
//
|
||||
|
||||
package storage
|
||||
|
||||
|
||||
60
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
60
vendor/github.com/containers/storage/drivers/aufs/aufs.go
generated
vendored
@@ -42,6 +42,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/locker"
|
||||
mountpk "github.com/containers/storage/pkg/mount"
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
@@ -77,6 +78,7 @@ type Driver struct {
|
||||
pathCache map[string]string
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
locker *locker.Locker
|
||||
mountOptions string
|
||||
}
|
||||
|
||||
// Init returns a new AUFS driver.
|
||||
@@ -103,6 +105,20 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "AUFS is not supported over %q", backingFs)
|
||||
}
|
||||
|
||||
var mountOptions string
|
||||
for _, option := range options {
|
||||
key, val, err := parsers.ParseKeyValueOpt(option)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case "aufs.mountopt":
|
||||
mountOptions = val
|
||||
default:
|
||||
return nil, fmt.Errorf("option %s not supported", option)
|
||||
}
|
||||
}
|
||||
paths := []string{
|
||||
"mnt",
|
||||
"diff",
|
||||
@@ -110,12 +126,13 @@ func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap
|
||||
}
|
||||
|
||||
a := &Driver{
|
||||
root: root,
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
pathCache: make(map[string]string),
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
|
||||
locker: locker.New(),
|
||||
root: root,
|
||||
uidMaps: uidMaps,
|
||||
gidMaps: gidMaps,
|
||||
pathCache: make(map[string]string),
|
||||
ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)),
|
||||
locker: locker.New(),
|
||||
mountOptions: mountOptions,
|
||||
}
|
||||
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
|
||||
@@ -388,7 +405,7 @@ func atomicRemove(source string) error {
|
||||
case os.IsExist(err):
|
||||
// Got error saying the target dir already exists, maybe the source doesn't exist due to a previous (failed) remove
|
||||
if _, e := os.Stat(source); !os.IsNotExist(e) {
|
||||
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up")
|
||||
return errors.Wrapf(err, "target rename dir '%s' exists but should not, this needs to be manually cleaned up", target)
|
||||
}
|
||||
default:
|
||||
return errors.Wrapf(err, "error preparing atomic delete")
|
||||
@@ -399,7 +416,7 @@ func atomicRemove(source string) error {
|
||||
|
||||
// Get returns the rootfs path for the id.
|
||||
// This will mount the dir at its given path
|
||||
func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (a *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
a.locker.Lock(id)
|
||||
defer a.locker.Unlock(id)
|
||||
parents, err := a.getParentLayerPaths(id)
|
||||
@@ -424,7 +441,7 @@ func (a *Driver) Get(id, mountLabel string) (string, error) {
|
||||
// If a dir does not have a parent ( no layers )do not try to mount
|
||||
// just return the diff path to the data
|
||||
if len(parents) > 0 {
|
||||
if err := a.mount(id, m, mountLabel, parents); err != nil {
|
||||
if err := a.mount(id, m, parents, options); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
@@ -568,7 +585,7 @@ func (a *Driver) getParentLayerPaths(id string) ([]string, error) {
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error {
|
||||
func (a *Driver) mount(id string, target string, layers []string, options graphdriver.MountOpts) error {
|
||||
a.Lock()
|
||||
defer a.Unlock()
|
||||
|
||||
@@ -579,7 +596,7 @@ func (a *Driver) mount(id string, target string, mountLabel string, layers []str
|
||||
|
||||
rw := a.getDiffPath(id)
|
||||
|
||||
if err := a.aufsMount(layers, rw, target, mountLabel); err != nil {
|
||||
if err := a.aufsMount(layers, rw, target, options); err != nil {
|
||||
return fmt.Errorf("error creating aufs mount to %s: %v", target, err)
|
||||
}
|
||||
return nil
|
||||
@@ -626,7 +643,7 @@ func (a *Driver) Cleanup() error {
|
||||
return mountpk.Unmount(a.root)
|
||||
}
|
||||
|
||||
func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) {
|
||||
func (a *Driver) aufsMount(ro []string, rw, target string, options graphdriver.MountOpts) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
Unmount(target)
|
||||
@@ -640,7 +657,7 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
||||
if useDirperm() {
|
||||
offset += len(",dirperm1")
|
||||
}
|
||||
b := make([]byte, unix.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel
|
||||
b := make([]byte, unix.Getpagesize()-len(options.MountLabel)-offset) // room for xino & mountLabel
|
||||
bp := copy(b, fmt.Sprintf("br:%s=rw", rw))
|
||||
|
||||
index := 0
|
||||
@@ -653,17 +670,25 @@ func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err erro
|
||||
}
|
||||
|
||||
opts := "dio,xino=/dev/shm/aufs.xino"
|
||||
mountOptions := a.mountOptions
|
||||
if len(options.Options) > 0 {
|
||||
mountOptions = strings.Join(options.Options, ",")
|
||||
}
|
||||
if mountOptions != "" {
|
||||
opts += fmt.Sprintf(",%s", mountOptions)
|
||||
}
|
||||
|
||||
if useDirperm() {
|
||||
opts += ",dirperm1"
|
||||
}
|
||||
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel)
|
||||
data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), options.MountLabel)
|
||||
if err = mount("none", target, "aufs", 0, data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for ; index < len(ro); index++ {
|
||||
layer := fmt.Sprintf(":%s=ro+wh", ro[index])
|
||||
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel)
|
||||
data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), options.MountLabel)
|
||||
if err = mount("none", target, "aufs", unix.MS_REMOUNT, data); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -707,3 +732,8 @@ func useDirperm() bool {
|
||||
func (a *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
return fmt.Errorf("aufs doesn't support changing ID mappings")
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (a *Driver) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
7
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/btrfs/btrfs.go
generated
vendored
@@ -110,6 +110,8 @@ func parseOptions(opt []string) (btrfsOptions, bool, error) {
|
||||
}
|
||||
userDiskQuota = true
|
||||
options.minSpace = uint64(minSpace)
|
||||
case "btrfs.mountopt":
|
||||
return options, userDiskQuota, fmt.Errorf("btrfs driver does not support mount options")
|
||||
default:
|
||||
return options, userDiskQuota, fmt.Errorf("Unknown option %s", key)
|
||||
}
|
||||
@@ -632,12 +634,15 @@ func (d *Driver) Remove(id string) error {
|
||||
}
|
||||
|
||||
// Get the requested filesystem id.
|
||||
func (d *Driver) Get(id, mountLabel string) (string, error) {
|
||||
func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
dir := d.subvolumesDirID(id)
|
||||
st, err := os.Stat(dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(options.Options) > 0 {
|
||||
return "", fmt.Errorf("btrfs driver does not support mount options")
|
||||
}
|
||||
|
||||
if !st.IsDir() {
|
||||
return "", fmt.Errorf("%s: not a directory", dir)
|
||||
|
||||
10
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
10
vendor/github.com/containers/storage/drivers/chown.go
generated
vendored
@@ -114,7 +114,10 @@ func NewNaiveLayerIDMapUpdater(driver ProtoDriver) LayerIDMapUpdater {
|
||||
// same "container" IDs.
|
||||
func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
driver := n.ProtoDriver
|
||||
layerFs, err := driver.Get(id, mountLabel)
|
||||
options := MountOpts{
|
||||
MountLabel: mountLabel,
|
||||
}
|
||||
layerFs, err := driver.Get(id, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -124,3 +127,8 @@ func (n *naiveLayerIDMapUpdater) UpdateLayerIDMap(id string, toContainer, toHost
|
||||
|
||||
return ChownPathByMaps(layerFs, toContainer, toHost)
|
||||
}
|
||||
|
||||
// SupportsShifting tells whether the driver support shifting of the UIDs/GIDs in an userNS
|
||||
func (n *naiveLayerIDMapUpdater) SupportsShifting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user