mirror of
https://github.com/containers/skopeo.git
synced 2026-02-01 14:58:59 +00:00
Compare commits
89 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
62e3747a11 | ||
|
|
882ba36ef8 | ||
|
|
3b699c5248 | ||
|
|
b82945b689 | ||
|
|
3851d89b17 | ||
|
|
4360db9f6d | ||
|
|
355de6c757 | ||
|
|
ceacd8d885 | ||
|
|
bc36bb416b | ||
|
|
e86ff0e79d | ||
|
|
590703db95 | ||
|
|
727212b12f | ||
|
|
593bdfe098 | ||
|
|
ba22d17d1f | ||
|
|
0caee746fb | ||
|
|
d2d41ebc33 | ||
|
|
9272b5177e | ||
|
|
62967259a4 | ||
|
|
224b54c367 | ||
|
|
0734c4ccb3 | ||
|
|
6b9345a5f9 | ||
|
|
ff5694b1a6 | ||
|
|
467a574e34 | ||
|
|
4043ecf922 | ||
|
|
e052488674 | ||
|
|
52aade5356 | ||
|
|
1491651ea9 | ||
|
|
d969934fa4 | ||
|
|
bda45f0d60 | ||
|
|
96e579720e | ||
|
|
9d88725a97 | ||
|
|
def5f4a11a | ||
|
|
c4275519ae | ||
|
|
b164a261cf | ||
|
|
f89bd82dcd | ||
|
|
ab4912a5a1 | ||
|
|
85d737fc29 | ||
|
|
0224d8cd38 | ||
|
|
f0730043c6 | ||
|
|
e0efa0c2b3 | ||
|
|
b9826f0c42 | ||
|
|
94d6767d07 | ||
|
|
226dc99ad4 | ||
|
|
eea384cdf7 | ||
|
|
76f5c6d4c5 | ||
|
|
79ef111398 | ||
|
|
af2998040a | ||
|
|
1f6c140716 | ||
|
|
b08008c5b2 | ||
|
|
c011e81b38 | ||
|
|
683d45ffd6 | ||
|
|
07d6e7db03 | ||
|
|
02ea29a99d | ||
|
|
f1849c6a47 | ||
|
|
03bb3c2f74 | ||
|
|
fdf0bec556 | ||
|
|
c736e69d48 | ||
|
|
d7156f9b3d | ||
|
|
15b3fdf6f4 | ||
|
|
0e1ba1fb70 | ||
|
|
ee590a9795 | ||
|
|
076d41d627 | ||
|
|
4830d90c32 | ||
|
|
2f8cc39a1a | ||
|
|
8602471486 | ||
|
|
1ee74864e9 | ||
|
|
81404fb71c | ||
|
|
845ad88cec | ||
|
|
9b6b57df50 | ||
|
|
fefeeb4c70 | ||
|
|
bbc0c69624 | ||
|
|
dcfcfdaa1e | ||
|
|
e41b0d67d6 | ||
|
|
1ec992abd1 | ||
|
|
b8ae5c6054 | ||
|
|
7c530bc55f | ||
|
|
3377542e27 | ||
|
|
cf5d9ffa49 | ||
|
|
686c3fcd7a | ||
|
|
78a24cea81 | ||
|
|
fd93ebb78d | ||
|
|
56dd3fc928 | ||
|
|
d830304e6d | ||
|
|
4960f390e2 | ||
|
|
9ba6dd71d7 | ||
|
|
dd6441b546 | ||
|
|
09cc6c3199 | ||
|
|
7f7b648443 | ||
|
|
cc571eb1ea |
@@ -8,7 +8,7 @@ that we follow.
|
||||
* [Reporting Issues](#reporting-issues)
|
||||
* [Submitting Pull Requests](#submitting-pull-requests)
|
||||
* [Communications](#communications)
|
||||
* [Becomign a Maintainer](#becoming-a-maintainer)
|
||||
* [Becoming a Maintainer](#becoming-a-maintainer)
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
FROM fedora
|
||||
|
||||
RUN dnf -y update && dnf install -y make git golang golang-github-cpuguy83-go-md2man \
|
||||
# storage deps
|
||||
btrfs-progs-devel \
|
||||
device-mapper-devel \
|
||||
# gpgme bindings deps
|
||||
libassuan-devel gpgme-devel \
|
||||
gnupg \
|
||||
@@ -43,7 +46,7 @@ RUN set -x \
|
||||
RUN set -x \
|
||||
&& yum install -y which git tar wget hostname util-linux bsdtar socat ethtool device-mapper iptables tree findutils nmap-ncat e2fsprogs xfsprogs lsof docker iproute \
|
||||
&& export GOPATH=$(mktemp -d) \
|
||||
&& git clone -b v1.3.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
&& git clone -b v1.5.0-alpha.3 git://github.com/openshift/origin "$GOPATH/src/github.com/openshift/origin" \
|
||||
&& (cd "$GOPATH/src/github.com/openshift/origin" && make clean build && make all WHAT=cmd/dockerregistry) \
|
||||
&& cp -a "$GOPATH/src/github.com/openshift/origin/_output/local/bin/linux"/*/* /usr/local/bin \
|
||||
&& cp "$GOPATH/src/github.com/openshift/origin/images/dockerregistry/config.yml" /atomic-registry-config.yml \
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM ubuntu:16.04
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y golang git-core libgpgme11-dev go-md2man
|
||||
RUN apt-get install -y golang btrfs-tools git-core libdevmapper-dev libgpgme11-dev go-md2man
|
||||
ENV GOPATH=/
|
||||
WORKDIR /src/github.com/projectatomic/skopeo
|
||||
|
||||
15
Makefile
15
Makefile
@@ -9,7 +9,7 @@ CONTAINERSSYSCONFIGDIR=${DESTDIR}/etc/containers
|
||||
REGISTRIESDDIR=${CONTAINERSSYSCONFIGDIR}/registries.d
|
||||
SIGSTOREDIR=${DESTDIR}/var/lib/atomic/sigstore
|
||||
BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions
|
||||
GO_MD2MAN ?= /usr/bin/go-md2man
|
||||
GO_MD2MAN ?= go-md2man
|
||||
|
||||
ifeq ($(DEBUG), 1)
|
||||
override GOGCFLAGS += -N -l
|
||||
@@ -32,6 +32,11 @@ GIT_COMMIT := $(shell git rev-parse HEAD 2> /dev/null || true)
|
||||
|
||||
MANPAGES_MD = $(wildcard docs/*.md)
|
||||
|
||||
BTRFS_BUILD_TAG = $(shell hack/btrfs_tag.sh)
|
||||
LIBDM_BUILD_TAG = $(shell hack/libdm_tag.sh)
|
||||
LOCAL_BUILD_TAGS = $(BTRFS_BUILD_TAG) $(LIBDM_BUILD_TAG)
|
||||
BUILDTAGS += $(LOCAL_BUILD_TAGS)
|
||||
|
||||
# make all DEBUG=1
|
||||
# Note: Uses the -N -l go compiler options to disable compiler optimizations
|
||||
# and inlining. Using these build options allows you to subsequently
|
||||
@@ -43,12 +48,12 @@ all: binary docs
|
||||
binary: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG))
|
||||
skopeobuildimage make binary-local $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
binary-static: cmd/skopeo
|
||||
docker build ${DOCKER_BUILD_ARGS} -f Dockerfile.build -t skopeobuildimage .
|
||||
docker run --rm --security-opt label:disable -v $$(pwd):/src/github.com/projectatomic/skopeo \
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG))
|
||||
skopeobuildimage make binary-local-static $(if $(DEBUG),DEBUG=$(DEBUG)) BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
# Build w/o using Docker containers
|
||||
binary-local:
|
||||
@@ -90,11 +95,11 @@ check: validate test-unit test-integration
|
||||
|
||||
# The tests can run out of entropy and block in containers, so replace /dev/random.
|
||||
test-integration: build-container
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 hack/make.sh test-integration'
|
||||
$(DOCKER_RUN_DOCKER) bash -c 'rm -f /dev/random; ln -sf /dev/urandom /dev/random; SKOPEO_CONTAINER_TESTS=1 BUILDTAGS="$(BUILDTAGS)" hack/make.sh test-integration'
|
||||
|
||||
test-unit: build-container
|
||||
# Just call (make test unit-local) here instead of worrying about environment differences, e.g. GO15VENDOREXPERIMENT.
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local
|
||||
$(DOCKER_RUN_DOCKER) make test-unit-local BUILDTAGS='$(BUILDTAGS)'
|
||||
|
||||
validate: build-container
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh validate-git-marks validate-gofmt validate-lint validate-vet
|
||||
|
||||
16
README.md
16
README.md
@@ -151,22 +151,22 @@ CONTRIBUTING
|
||||
|
||||
### Dependencies management
|
||||
|
||||
`skopeo` uses a custom bash script for dependencies management. This script is located at `hack/vendor.sh`.
|
||||
`skopeo` uses [`vndr`](https://github.com/LK4D4/vndr) for dependencies management.
|
||||
|
||||
In order to add a new dependency to this project:
|
||||
|
||||
- add a new line to `hack/vendor.sh` (e.g. `clone git github.com/pkg/errors master`)
|
||||
- run `hack/vendor.sh`
|
||||
- add a new line to `vendor.conf` according to `vndr` rules (e.g. `github.com/pkg/errors master`)
|
||||
- run `vndr github.com/pkg/errors`
|
||||
|
||||
In order to update an existing dependency (or more):
|
||||
In order to update an existing dependency:
|
||||
|
||||
- update the relevant dependency line in `hack/vendor.sh`
|
||||
- run `hack/vendor.sh`
|
||||
- update the relevant dependency line in `vendor.conf`
|
||||
- run `vndr github.com/pkg/errors`
|
||||
|
||||
In order to test out new PRs from [containers/image](https://github.com/containers/image) to not break `skopeo`:
|
||||
|
||||
- update `hack/vendor.sh`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `clone git github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `hack/vendor.sh`
|
||||
- update `vendor.conf`. Find out the `containers/image` dependency; update it to vendor from your own branch and your own repository fork (e.g. `github.com/containers/image my-branch https://github.com/runcom/image`)
|
||||
- run `vndr github.com/containers/image`
|
||||
|
||||
License
|
||||
-
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/copy"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -37,11 +37,11 @@ func copyHandler(context *cli.Context) error {
|
||||
}
|
||||
defer policyContext.Destroy()
|
||||
|
||||
srcRef, err := transports.ParseImageName(context.Args()[0])
|
||||
srcRef, err := alltransports.ParseImageName(context.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err)
|
||||
}
|
||||
destRef, err := transports.ParseImageName(context.Args()[1])
|
||||
destRef, err := alltransports.ParseImageName(context.Args()[1])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid destination name %s: %v", context.Args()[1], err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -13,7 +13,7 @@ func deleteHandler(context *cli.Context) error {
|
||||
return errors.New("Usage: delete imageReference")
|
||||
}
|
||||
|
||||
ref, err := transports.ParseImageName(context.Args()[0])
|
||||
ref, err := alltransports.ParseImageName(context.Args()[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err)
|
||||
}
|
||||
|
||||
@@ -3,11 +3,14 @@ package main
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -31,7 +34,7 @@ var inspectCmd = cli.Command{
|
||||
ArgsUsage: "IMAGE-NAME",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cert-path",
|
||||
Name: "cert-dir",
|
||||
Value: "",
|
||||
Usage: "use certificates at `PATH` (*.crt, *.cert, *.key) to connect to the registry",
|
||||
},
|
||||
@@ -49,12 +52,17 @@ var inspectCmd = cli.Command{
|
||||
Usage: "Use `USERNAME[:PASSWORD]` for accessing the registry",
|
||||
},
|
||||
},
|
||||
Action: func(c *cli.Context) error {
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
img, err := parseImage(c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer img.Close()
|
||||
|
||||
defer func() {
|
||||
if err := img.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, fmt.Sprintf("(could not close image: %v) ", err))
|
||||
}
|
||||
}()
|
||||
|
||||
rawManifest, _, err := img.Manifest()
|
||||
if err != nil {
|
||||
@@ -91,7 +99,13 @@ var inspectCmd = cli.Command{
|
||||
outputData.Name = dockerImg.SourceRefFullName()
|
||||
outputData.RepoTags, err = dockerImg.GetRepositoryTags()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
// some registries may decide to block the "list all tags" endpoint
|
||||
// gracefully allow the inspect to continue in this case. Currently
|
||||
// the IBM Bluemix container registry has this restriction.
|
||||
if !strings.Contains(err.Error(), "401") {
|
||||
return fmt.Errorf("Error determining repository tags: %v", err)
|
||||
}
|
||||
logrus.Warnf("Registry disallows tag list retrieval; skipping")
|
||||
}
|
||||
}
|
||||
out, err := json.MarshalIndent(outputData, "", " ")
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -11,7 +10,8 @@ import (
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ var layersCmd = cli.Command{
|
||||
Usage: "Get layers of IMAGE-NAME",
|
||||
ArgsUsage: "IMAGE-NAME [LAYER...]",
|
||||
Hidden: true,
|
||||
Action: func(c *cli.Context) error {
|
||||
Action: func(c *cli.Context) (retErr error) {
|
||||
fmt.Fprintln(os.Stderr, `DEPRECATED: skopeo layers is deprecated in favor of skopeo copy`)
|
||||
if c.NArg() == 0 {
|
||||
return errors.New("Usage: layers imageReference [layer...]")
|
||||
@@ -36,17 +36,24 @@ var layersCmd = cli.Command{
|
||||
}
|
||||
src, err := image.FromSource(rawSource)
|
||||
if err != nil {
|
||||
rawSource.Close()
|
||||
if closeErr := rawSource.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
defer src.Close()
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var blobDigests []digest.Digest
|
||||
for _, dString := range c.Args().Tail() {
|
||||
if !strings.HasPrefix(dString, "sha256:") {
|
||||
dString = "sha256:" + dString
|
||||
}
|
||||
d, err := digest.ParseDigest(dString)
|
||||
d, err := digest.Parse(dString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,7 +87,12 @@ var layersCmd = cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dest.Close()
|
||||
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (close error: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for _, digest := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(types.BlobInfo{Digest: digest, Size: -1})
|
||||
@@ -88,10 +100,11 @@ var layersCmd = cli.Command{
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(r, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
|
||||
r.Close()
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
r.Close()
|
||||
}
|
||||
|
||||
manifest, _, err := src.Manifest()
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/storage/pkg/reexec"
|
||||
"github.com/projectatomic/skopeo/version"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -40,6 +41,10 @@ func createApp() *cli.App {
|
||||
Value: "",
|
||||
Usage: "Path to a trust policy file",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "insecure-policy",
|
||||
Usage: "run the tool without any policy check",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "registries.d",
|
||||
Value: "",
|
||||
@@ -63,11 +68,15 @@ func createApp() *cli.App {
|
||||
manifestDigestCmd,
|
||||
standaloneSignCmd,
|
||||
standaloneVerifyCmd,
|
||||
untrustedSignatureDumpCmd,
|
||||
}
|
||||
return app
|
||||
}
|
||||
|
||||
func main() {
|
||||
if reexec.Init() {
|
||||
return
|
||||
}
|
||||
app := createApp()
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
logrus.Fatal(err)
|
||||
@@ -79,7 +88,9 @@ func getPolicyContext(c *cli.Context) (*signature.PolicyContext, error) {
|
||||
policyPath := c.GlobalString("policy")
|
||||
var policy *signature.Policy // This could be cached across calls, if we had an application context.
|
||||
var err error
|
||||
if policyPath == "" {
|
||||
if c.GlobalBool("insecure-policy") {
|
||||
policy = &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
||||
} else if policyPath == "" {
|
||||
policy, err = signature.DefaultPolicy(nil)
|
||||
} else {
|
||||
policy, err = signature.NewPolicyFromFile(policyPath)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -27,6 +28,7 @@ func standaloneSign(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
signature, err := signature.SignDockerManifest(manifest, dockerReference, mech, fingerprint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
@@ -73,6 +75,7 @@ func standaloneVerify(context *cli.Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
}
|
||||
defer mech.Close()
|
||||
sig, err := signature.VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, expectedFingerprint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error verifying signature: %v", err)
|
||||
@@ -88,3 +91,40 @@ var standaloneVerifyCmd = cli.Command{
|
||||
ArgsUsage: "MANIFEST DOCKER-REFERENCE KEY-FINGERPRINT SIGNATURE",
|
||||
Action: standaloneVerify,
|
||||
}
|
||||
|
||||
func untrustedSignatureDump(context *cli.Context) error {
|
||||
if len(context.Args()) != 1 {
|
||||
return errors.New("Usage: skopeo untrusted-signature-dump-without-verification signature")
|
||||
}
|
||||
untrustedSignaturePath := context.Args()[0]
|
||||
|
||||
untrustedSignature, err := ioutil.ReadFile(untrustedSignaturePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading untrusted signature from %s: %v", untrustedSignaturePath, err)
|
||||
}
|
||||
|
||||
untrustedInfo, err := signature.GetUntrustedSignatureInformationWithoutVerifying(untrustedSignature)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error decoding untrusted signature: %v", err)
|
||||
}
|
||||
untrustedOut, err := json.MarshalIndent(untrustedInfo, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(context.App.Writer, string(untrustedOut))
|
||||
return nil
|
||||
}
|
||||
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
//
|
||||
// The subcommand is undocumented, and it may be renamed or entirely disappear in the future.
|
||||
var untrustedSignatureDumpCmd = cli.Command{
|
||||
Name: "untrusted-signature-dump-without-verification",
|
||||
Usage: "Dump contents of a signature WITHOUT VERIFYING IT",
|
||||
ArgsUsage: "SIGNATURE",
|
||||
Hidden: true,
|
||||
Action: untrustedSignatureDump,
|
||||
}
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -16,6 +18,8 @@ const (
|
||||
fixturesTestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
|
||||
// fixturesTestKeyFingerprint is the fingerprint of the private key.
|
||||
fixturesTestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
|
||||
// fixturesTestKeyFingerprint is the key ID of the private key.
|
||||
fixturesTestKeyShortID = "DB72F2188BB46CC8"
|
||||
)
|
||||
|
||||
// Test that results of runSkopeo failed with nothing on stdout, and substring
|
||||
@@ -27,6 +31,13 @@ func assertTestFailed(t *testing.T, stdout string, err error, substring string)
|
||||
}
|
||||
|
||||
func TestStandaloneSign(t *testing.T) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
require.NoError(t, err)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
t.Skipf("Signing not supported: %v", err)
|
||||
}
|
||||
|
||||
manifestPath := "fixtures/image.manifest.json"
|
||||
dockerReference := "testing/manifest"
|
||||
os.Setenv("GNUPGHOME", "fixtures")
|
||||
@@ -55,7 +66,7 @@ func TestStandaloneSign(t *testing.T) {
|
||||
manifestPath, "" /* empty reference */, fixturesTestKeyFingerprint)
|
||||
assertTestFailed(t, out, err, "empty signature content")
|
||||
|
||||
// Unknown key. (FIXME? The error is 'Error creating signature: End of file")
|
||||
// Unknown key.
|
||||
out, err = runSkopeo("standalone-sign", "-o", "/dev/null",
|
||||
manifestPath, dockerReference, "UNKNOWN GPG FINGERPRINT")
|
||||
assert.Error(t, err)
|
||||
@@ -72,17 +83,18 @@ func TestStandaloneSign(t *testing.T) {
|
||||
defer os.Remove(sigOutput.Name())
|
||||
out, err = runSkopeo("standalone-sign", "-o", sigOutput.Name(),
|
||||
manifestPath, dockerReference, fixturesTestKeyFingerprint)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, out)
|
||||
|
||||
sig, err := ioutil.ReadFile(sigOutput.Name())
|
||||
require.NoError(t, err)
|
||||
manifest, err := ioutil.ReadFile(manifestPath)
|
||||
require.NoError(t, err)
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
mech, err = signature.NewGPGSigningMechanism()
|
||||
require.NoError(t, err)
|
||||
defer mech.Close()
|
||||
verified, err := signature.VerifyDockerManifestSignature(sig, manifest, dockerReference, mech, fixturesTestKeyFingerprint)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, dockerReference, verified.DockerReference)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest, verified.DockerManifestDigest)
|
||||
}
|
||||
@@ -125,3 +137,42 @@ func TestStandaloneVerify(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "Signature verified, digest "+fixturesTestImageManifestDigest.String()+"\n", out)
|
||||
}
|
||||
|
||||
func TestUntrustedSignatureDump(t *testing.T) {
|
||||
// Invalid command-line arguments
|
||||
for _, args := range [][]string{
|
||||
{},
|
||||
{"a1", "a2"},
|
||||
{"a1", "a2", "a3", "a4"},
|
||||
} {
|
||||
out, err := runSkopeo(append([]string{"untrusted-signature-dump-without-verification"}, args...)...)
|
||||
assertTestFailed(t, out, err, "Usage")
|
||||
}
|
||||
|
||||
// Error reading manifest
|
||||
out, err := runSkopeo("untrusted-signature-dump-without-verification",
|
||||
"/this/doesnt/exist")
|
||||
assertTestFailed(t, out, err, "/this/doesnt/exist")
|
||||
|
||||
// Error reading signature (input is not a signature)
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", "fixtures/image.manifest.json")
|
||||
assertTestFailed(t, out, err, "Error decoding untrusted signature")
|
||||
|
||||
// Success
|
||||
for _, path := range []string{"fixtures/image.signature", "fixtures/corrupt.signature"} {
|
||||
// Success
|
||||
out, err = runSkopeo("untrusted-signature-dump-without-verification", path)
|
||||
require.NoError(t, err)
|
||||
|
||||
var info signature.UntrustedSignatureInformation
|
||||
err := json.Unmarshal([]byte(out), &info)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fixturesTestImageManifestDigest, info.UntrustedDockerManifestDigest)
|
||||
assert.Equal(t, "testing/manifest", info.UntrustedDockerReference)
|
||||
assert.NotNil(t, info.UntrustedCreatorID)
|
||||
assert.Equal(t, "atomic ", *info.UntrustedCreatorID)
|
||||
assert.NotNil(t, info.UntrustedTimestamp)
|
||||
assert.True(t, time.Unix(1458239713, 0).Equal(*info.UntrustedTimestamp))
|
||||
assert.Equal(t, fixturesTestKeyShortID, info.UntrustedShortKeyIdentifier)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/transports/alltransports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/urfave/cli"
|
||||
)
|
||||
@@ -59,7 +59,7 @@ func getDockerAuth(creds string) (*types.DockerAuthConfig, error) {
|
||||
// The caller must call .Close() on the returned Image.
|
||||
func parseImage(c *cli.Context) (types.Image, error) {
|
||||
imgName := c.Args().First()
|
||||
ref, err := transports.ParseImageName(imgName)
|
||||
ref, err := alltransports.ParseImageName(imgName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func parseImage(c *cli.Context) (types.Image, error) {
|
||||
// requestedManifestMIMETypes is as in types.ImageReference.NewImageSource.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func parseImageSource(c *cli.Context, name string, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
ref, err := transports.ParseImageName(name)
|
||||
ref, err := alltransports.ParseImageName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -23,10 +23,10 @@ _skopeo_copy() {
|
||||
local options_with_args="
|
||||
--sign-by
|
||||
--src-creds --screds
|
||||
--src-cert-path
|
||||
--src-cert-dir
|
||||
--src-tls-verify
|
||||
--dest-creds --dcreds
|
||||
--dest-cert-path
|
||||
--dest-cert-dir
|
||||
--dest-tls-verify
|
||||
"
|
||||
local boolean_options="
|
||||
@@ -38,7 +38,7 @@ _skopeo_copy() {
|
||||
_skopeo_inspect() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-path
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--raw
|
||||
@@ -75,7 +75,7 @@ _skopeo_manifest_digest() {
|
||||
_skopeo_delete() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-path
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
@@ -86,7 +86,7 @@ _skopeo_delete() {
|
||||
_skopeo_layers() {
|
||||
local options_with_args="
|
||||
--creds
|
||||
--cert-path
|
||||
--cert-dir
|
||||
"
|
||||
local boolean_options="
|
||||
--tls-verify
|
||||
@@ -100,6 +100,7 @@ _skopeo_skopeo() {
|
||||
--registries.d
|
||||
"
|
||||
local boolean_options="
|
||||
--insecure-policy
|
||||
--debug
|
||||
--version -v
|
||||
--help -h
|
||||
|
||||
@@ -39,6 +39,8 @@ Most commands refer to container images, using a _transport_`:`_details_ format.
|
||||
|
||||
**--policy** _path-to-policy_ Path to a policy.json file to use for verifying signatures and deciding whether an image is trusted, overriding the default trust policy file.
|
||||
|
||||
**--insecure-policy** Adopt an insecure, permissive policy that allows anything. This obviates the need for a policy file.
|
||||
|
||||
**--registries.d** _dir_ use registry configuration files in _dir_ (e.g. for docker signature storage), overriding the default path.
|
||||
|
||||
**--help**|**-h** Show help
|
||||
|
||||
@@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PROJECT=github.com/projectatomic/skopeo
|
||||
|
||||
# Downloads dependencies into vendor/ directory
|
||||
mkdir -p vendor
|
||||
|
||||
original_GOPATH=$GOPATH
|
||||
export GOPATH="${PWD}/vendor:$GOPATH"
|
||||
|
||||
find="/usr/bin/find"
|
||||
|
||||
clone() {
|
||||
local delete_vendor=true
|
||||
if [ "x$1" = x--keep-vendor ]; then
|
||||
delete_vendor=false
|
||||
shift
|
||||
fi
|
||||
|
||||
local vcs="$1"
|
||||
local pkg="$2"
|
||||
local rev="$3"
|
||||
local url="$4"
|
||||
|
||||
: ${url:=https://$pkg}
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo -n "$pkg @ $rev: "
|
||||
|
||||
if [ -d "$target" ]; then
|
||||
echo -n 'rm old, '
|
||||
rm -rf "$target"
|
||||
fi
|
||||
|
||||
echo -n 'clone, '
|
||||
case "$vcs" in
|
||||
git)
|
||||
git clone --quiet --no-checkout "$url" "$target"
|
||||
( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" -- )
|
||||
;;
|
||||
hg)
|
||||
hg clone --quiet --updaterev "$rev" "$url" "$target"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo -n 'rm VCS, '
|
||||
( cd "$target" && rm -rf .{git,hg} )
|
||||
|
||||
if $delete_vendor; then
|
||||
echo -n 'rm vendor, '
|
||||
( cd "$target" && rm -rf vendor Godeps/_workspace )
|
||||
fi
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
clean() {
|
||||
# If $GOPATH starts with ./vendor, (go list) shows the short-form import paths for packages inside ./vendor.
|
||||
# So, reset GOPATH to the external value (without ./vendor), so that the grep -v works.
|
||||
local packages=($(GOPATH=$original_GOPATH go list -e ./... | grep -v "^${PROJECT}/vendor"))
|
||||
local platforms=( linux/amd64 linux/386 )
|
||||
|
||||
local buildTags=( )
|
||||
|
||||
echo
|
||||
|
||||
echo -n 'collecting import graph, '
|
||||
local IFS=$'\n'
|
||||
local imports=( $(
|
||||
for platform in "${platforms[@]}"; do
|
||||
export GOOS="${platform%/*}";
|
||||
export GOARCH="${platform##*/}";
|
||||
go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}"
|
||||
go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}"
|
||||
done | grep -vE "^${PROJECT}" | sort -u
|
||||
) )
|
||||
# .TestImports does not include indirect dependencies, so do one more iteration.
|
||||
imports+=( $(
|
||||
go list -e -f '{{join .Deps "\n"}}' "${imports[@]}" | grep -vE "^${PROJECT}" | sort -u
|
||||
) )
|
||||
imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") )
|
||||
unset IFS
|
||||
|
||||
echo -n 'pruning unused packages, '
|
||||
findArgs=(
|
||||
# This directory contains only .c and .h files which are necessary
|
||||
# -path vendor/src/github.com/mattn/go-sqlite3/code
|
||||
)
|
||||
for import in "${imports[@]}"; do
|
||||
[ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or )
|
||||
findArgs+=( -path "vendor/src/$import" )
|
||||
done
|
||||
local IFS=$'\n'
|
||||
local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') )
|
||||
unset IFS
|
||||
for dir in "${prune[@]}"; do
|
||||
$find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';'
|
||||
rmdir "$dir" 2>/dev/null || true
|
||||
done
|
||||
|
||||
echo -n 'pruning unused files, '
|
||||
$find vendor -type f -name '*_test.go' -exec rm -v '{}' ';'
|
||||
|
||||
echo done
|
||||
}
|
||||
|
||||
# Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring
|
||||
fix_rewritten_imports () {
|
||||
local pkg="$1"
|
||||
local remove="${pkg}/Godeps/_workspace/src/"
|
||||
local target="vendor/src/$pkg"
|
||||
|
||||
echo "$pkg: fixing rewritten imports"
|
||||
$find "$target" -name \*.go -exec sed -i -e "s|\"${remove}|\"|g" {} \;
|
||||
}
|
||||
7
hack/btrfs_tag.sh
Executable file
7
hack/btrfs_tag.sh
Executable file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
cc -E - > /dev/null 2> /dev/null << EOF
|
||||
#include <btrfs/version.h>
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo btrfs_noversion
|
||||
fi
|
||||
14
hack/libdm_tag.sh
Executable file
14
hack/libdm_tag.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
tmpdir="$PWD/tmp.$RANDOM"
|
||||
mkdir -p "$tmpdir"
|
||||
trap 'rm -fr "$tmpdir"' EXIT
|
||||
cc -c -o "$tmpdir"/libdm_tag.o -x c - > /dev/null 2> /dev/null << EOF
|
||||
#include <libdevmapper.h>
|
||||
int main() {
|
||||
struct dm_task *task;
|
||||
return 0;
|
||||
}
|
||||
EOF
|
||||
if test $? -ne 0 ; then
|
||||
echo libdm_no_deferred_remove
|
||||
fi
|
||||
@@ -72,10 +72,10 @@ TESTFLAGS+=" -test.timeout=10m"
|
||||
go_test_dir() {
|
||||
dir=$1
|
||||
(
|
||||
echo '+ go test' $TESTFLAGS "${SKOPEO_PKG}${dir#.}"
|
||||
echo '+ go test' $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"} "${SKOPEO_PKG}${dir#.}"
|
||||
cd "$dir"
|
||||
export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up
|
||||
go test $TESTFLAGS
|
||||
go test $TESTFLAGS ${BUILDTAGS:+-tags "$BUILDTAGS"}
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ bundle_test_integration() {
|
||||
|
||||
# subshell so that we can export PATH without breaking other things
|
||||
(
|
||||
make binary-local
|
||||
make binary-local ${BUILDTAGS:+BUILDTAGS="$BUILDTAGS"}
|
||||
make install
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
bundle_test_integration
|
||||
|
||||
49
hack/vendor.sh
Executable file → Normal file
49
hack/vendor.sh
Executable file → Normal file
@@ -1,40 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/bin/bash
|
||||
|
||||
# This file is just wrapper around vndr (github.com/LK4D4/vndr) tool.
|
||||
# For updating dependencies you should change `vendor.conf` file in root of the
|
||||
# project. Please refer to https://github.com/LK4D4/vndr/blob/master/README.md for
|
||||
# vndr usage.
|
||||
|
||||
set -e
|
||||
|
||||
cd "$(dirname "$BASH_SOURCE")/.."
|
||||
rm -rf vendor/
|
||||
source 'hack/.vendor-helpers.sh'
|
||||
if ! hash vndr; then
|
||||
echo "Please install vndr with \"go get github.com/LK4D4/vndr\" and put it in your \$GOPATH"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
clone git github.com/urfave/cli v1.17.0
|
||||
clone git github.com/containers/image master
|
||||
clone git gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
clone git github.com/Sirupsen/logrus v0.10.0
|
||||
clone git github.com/go-check/check v1
|
||||
clone git github.com/stretchr/testify v1.1.3
|
||||
clone git github.com/davecgh/go-spew master
|
||||
clone git github.com/pmezard/go-difflib master
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
clone git github.com/docker/docker v1.12.1
|
||||
clone git github.com/docker/engine-api 4eca04ae18f4f93f40196a17b9aa6e11262a7269
|
||||
clone git github.com/docker/go-connections 4ccf312bf1d35e5dbda654e57a9be4c3f3cd0366
|
||||
clone git github.com/vbatts/tar-split v0.9.11
|
||||
clone git github.com/gorilla/context 14f550f51a
|
||||
clone git github.com/gorilla/mux e444e69cbd
|
||||
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
|
||||
clone git golang.org/x/net master https://github.com/golang/net.git
|
||||
# end docker deps
|
||||
clone git github.com/docker/distribution 07f32ac1831ed0fc71960b7da5d6bb83cb6881b5
|
||||
clone git github.com/docker/libtrust master
|
||||
clone git github.com/opencontainers/runc master
|
||||
clone git github.com/opencontainers/image-spec 7dc1ee39c59c6949612c6fdf502a4727750cb063
|
||||
clone git github.com/mtrmac/gpgme master
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
clone git github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
clone git k8s.io/kubernetes 4a3f9c5b19c7ff804cbc1bf37a15c044ca5d2353 https://github.com/openshift/kubernetes
|
||||
clone git github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
clone git gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
clone git github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
|
||||
clean
|
||||
|
||||
mv vendor/src/* vendor/
|
||||
vndr "$@"
|
||||
|
||||
@@ -85,6 +85,13 @@ func (s *SkopeoSuite) TestNeedAuthToPrivateRegistryV2WithoutDockerCfg(c *check.C
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCertDirInsteadOfCertPath(c *check.C) {
|
||||
wanted := ".*flag provided but not defined: -cert-path.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-path=/")
|
||||
wanted = ".*unauthorized: authentication required.*"
|
||||
assertSkopeoFails(c, wanted, "--tls-verify=false", "inspect", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), "--cert-dir=/etc/docker/certs.d/")
|
||||
}
|
||||
|
||||
// TODO(runcom): as soon as we can push to registries ensure you can inspect here
|
||||
// not just get image not found :)
|
||||
func (s *SkopeoSuite) TestNoNeedAuthToPrivateRegistryV2ImageNotFound(c *check.C) {
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
@@ -11,8 +12,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-tools/image"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -116,8 +119,7 @@ func (s *CopySuite) TestCopySimpleAtomicRegistry(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "dir:"+dir1, "atomic:localhost:5000/myns/unsigned:unsigned")
|
||||
// The result of pushing and pulling is an unmodified image.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:unsigned", "dir:"+dir2)
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
destructiveCheckDirImagesAreEqual(c, dir1, dir2)
|
||||
}
|
||||
|
||||
// The most basic (skopeo copy) use:
|
||||
@@ -151,22 +153,10 @@ func (s *CopySuite) TestCopySimple(c *check.C) {
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
|
||||
// Streaming (skopeo copy)
|
||||
func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "streaming-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "streaming-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
// Compare (copies of) the original and the copy:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
|
||||
// The manifests will have different JWS signatures; so, compare the manifests by digests, which
|
||||
// Check whether dir: images in dir1 and dir2 are equal.
|
||||
// WARNING: This modifies the contents of dir1 and dir2!
|
||||
func destructiveCheckDirImagesAreEqual(c *check.C, dir1, dir2 string) {
|
||||
// The manifests may have different JWS signatures; so, compare the manifests by digests, which
|
||||
// strips the signatures, and remove them, comparing the rest file by file.
|
||||
digests := []digest.Digest{}
|
||||
for _, dir := range []string{dir1, dir2} {
|
||||
@@ -183,11 +173,83 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
c.Assert(digests[0], check.Equals, digests[1])
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", dir1, dir2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
// Streaming (skopeo copy)
|
||||
func (s *CopySuite) TestCopyStreaming(c *check.C) {
|
||||
dir1, err := ioutil.TempDir("", "streaming-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir1)
|
||||
dir2, err := ioutil.TempDir("", "streaming-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir2)
|
||||
|
||||
// FIXME: It would be nice to use one of the local Docker registries instead of neeeding an Internet connection.
|
||||
// streaming: docker: → atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://estesp/busybox:amd64", "atomic:localhost:5000/myns/unsigned:streaming")
|
||||
// Compare (copies of) the original and the copy:
|
||||
assertSkopeoSucceeds(c, "", "copy", "docker://estesp/busybox:amd64", "dir:"+dir1)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
|
||||
destructiveCheckDirImagesAreEqual(c, dir1, dir2)
|
||||
// FIXME: Also check pushing to docker://
|
||||
}
|
||||
|
||||
// OCI round-trip testing. It's very important to make sure that OCI <-> Docker
|
||||
// conversion works (while skopeo handles many things, one of the most obvious
|
||||
// benefits of a tool like skopeo is that you can use OCI tooling to create an
|
||||
// image and then as the final step convert the image to a non-standard format
|
||||
// like Docker). But this only works if we _test_ it.
|
||||
func (s *CopySuite) TestCopyOCIRoundTrip(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
oci1, err := ioutil.TempDir("", "oci-1")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci1)
|
||||
oci2, err := ioutil.TempDir("", "oci-2")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(oci2)
|
||||
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "docker://busybox", "oci:"+oci1+":latest")
|
||||
// OCI -> Docker
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci1+":latest", ourRegistry+"original/busybox:oci_copy")
|
||||
// Docker -> OCI
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", ourRegistry+"original/busybox:oci_copy", "oci:"+oci2+":latest")
|
||||
// OCI -> Docker
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--debug", "copy", "oci:"+oci2+":latest", ourRegistry+"original/busybox:oci_copy2")
|
||||
|
||||
// TODO: Add some more tags to output to and check those work properly.
|
||||
|
||||
// First, make sure the OCI blobs are the same. This should _always_ be true.
|
||||
out := combinedOutputOfCommand(c, "diff", "-urN", oci1+"/blobs", oci2+"/blobs")
|
||||
c.Assert(out, check.Equals, "")
|
||||
|
||||
// For some silly reason we pass a logger to the OCI library here...
|
||||
logger := log.New(os.Stderr, "", 0)
|
||||
|
||||
// TODO: Verify using the upstream OCI image validator.
|
||||
err = image.ValidateLayout(oci1, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
err = image.ValidateLayout(oci2, nil, logger)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
// Now verify that everything is identical. Currently this is true, but
|
||||
// because we recompute the manifests on-the-fly this doesn't necessarily
|
||||
// always have to be true (but if this breaks in the future __PLEASE__ make
|
||||
// sure that the breakage actually makes sense before removing this check).
|
||||
out = combinedOutputOfCommand(c, "diff", "-urN", oci1, oci2)
|
||||
c.Assert(out, check.Equals, "")
|
||||
}
|
||||
|
||||
// --sign-by and --policy copy, primarily using atomic:
|
||||
func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
dir, err := ioutil.TempDir("", "signatures-dest")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(dir)
|
||||
@@ -237,6 +299,13 @@ func (s *CopySuite) TestCopySignatures(c *check.C) {
|
||||
|
||||
// --policy copy for dir: sources
|
||||
func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "dir-signatures-top")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
@@ -336,11 +405,18 @@ func findRegularFiles(c *check.C, root string) []string {
|
||||
|
||||
// --sign-by and policy use for docker: with sigstore
|
||||
func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
tmpDir, err := ioutil.TempDir("", "signatures-sigstore")
|
||||
c.Assert(err, check.IsNil)
|
||||
//defer os.RemoveAll(tmpDir)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
copyDest := filepath.Join(tmpDir, "dest")
|
||||
err = os.Mkdir(copyDest, 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
@@ -381,7 +457,6 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
// Deleting the image succeeds,
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "delete", ourRegistry+"signed/busybox")
|
||||
// and the signature file has been deleted (but we leave the directories around).
|
||||
// a signature file has been created,
|
||||
foundFiles = findRegularFiles(c, plainSigstore)
|
||||
c.Assert(foundFiles, check.HasLen, 0)
|
||||
|
||||
@@ -398,6 +473,64 @@ func (s *CopySuite) TestCopyDockerSigstore(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir, "copy", ourRegistry+"public/busybox", dirDest)
|
||||
}
|
||||
|
||||
// atomic: and docker: X-Registry-Supports-Signatures works and interoperates
|
||||
func (s *CopySuite) TestCopyAtomicExtension(c *check.C) {
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that the reading/writing works using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
topDir, err := ioutil.TempDir("", "atomic-extension")
|
||||
c.Assert(err, check.IsNil)
|
||||
defer os.RemoveAll(topDir)
|
||||
for _, subdir := range []string{"dirAA", "dirAD", "dirDA", "dirDD", "registries.d"} {
|
||||
err := os.MkdirAll(filepath.Join(topDir, subdir), 0755)
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
registriesDir := filepath.Join(topDir, "registries.d")
|
||||
dirDest := "dir:" + topDir
|
||||
policy := fileFromFixture(c, "fixtures/policy.json", map[string]string{"@keydir@": s.gpgHome})
|
||||
defer os.Remove(policy)
|
||||
|
||||
// Get an image to work with to an atomic: destination. Also verifies that we can use Docker repositories without X-Registry-Supports-Signatures
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir, "copy", "docker://busybox", "atomic:localhost:5000/myns/extension:unsigned")
|
||||
// Pulling an unsigned image using atomic: fails.
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy,
|
||||
"copy", "atomic:localhost:5000/myns/extension:unsigned", dirDest+"/dirAA")
|
||||
// The same when pulling using docker:
|
||||
assertSkopeoFails(c, ".*Source image rejected: A signature was required, but no signature exists.*",
|
||||
"--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
|
||||
"copy", "docker://localhost:5000/myns/extension:unsigned", dirDest+"/dirAD")
|
||||
|
||||
// Sign the image using atomic:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false",
|
||||
"copy", "--sign-by", "personal@example.com", "atomic:localhost:5000/myns/extension:unsigned", "atomic:localhost:5000/myns/extension:atomic")
|
||||
// Pulling the image using atomic: now succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy,
|
||||
"copy", "atomic:localhost:5000/myns/extension:atomic", dirDest+"/dirAA")
|
||||
// The same when pulling using docker:
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
|
||||
"copy", "docker://localhost:5000/myns/extension:atomic", dirDest+"/dirAD")
|
||||
// Both access methods result in the same data.
|
||||
destructiveCheckDirImagesAreEqual(c, filepath.Join(topDir, "dirAA"), filepath.Join(topDir, "dirAD"))
|
||||
|
||||
// Get another image (different so that they don't share signatures, and sign it using docker://)
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "--registries.d", registriesDir,
|
||||
"copy", "--sign-by", "personal@example.com", "docker://estesp/busybox:ppc64le", "atomic:localhost:5000/myns/extension:extension")
|
||||
c.Logf("%s", combinedOutputOfCommand(c, "oc", "get", "istag", "extension:extension", "-o", "json"))
|
||||
// Pulling the image using atomic: succeeds.
|
||||
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy,
|
||||
"copy", "atomic:localhost:5000/myns/extension:extension", dirDest+"/dirDA")
|
||||
// The same when pulling using docker:
|
||||
assertSkopeoSucceeds(c, "", "--debug", "--tls-verify=false", "--policy", policy, "--registries.d", registriesDir,
|
||||
"copy", "docker://localhost:5000/myns/extension:extension", dirDest+"/dirDD")
|
||||
// Both access methods result in the same data.
|
||||
destructiveCheckDirImagesAreEqual(c, filepath.Join(topDir, "dirDA"), filepath.Join(topDir, "dirDD"))
|
||||
}
|
||||
|
||||
func (s *SkopeoSuite) TestCopySrcWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
dir1, err := ioutil.TempDir("", "copy-1")
|
||||
@@ -414,3 +547,12 @@ func (s *SkopeoSuite) TestCopySrcAndDestWithAuth(c *check.C) {
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--dest-creds=testuser:testpassword", "docker://busybox", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url))
|
||||
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "--src-creds=testuser:testpassword", "--dest-creds=testuser:testpassword", fmt.Sprintf("docker://%s/busybox:latest", s.regV2WithAuth.url), fmt.Sprintf("docker://%s/test:auth", s.regV2WithAuth.url))
|
||||
}
|
||||
|
||||
func (s *CopySuite) TestCopyNoPanicOnHTTPResponseWOTLSVerifyFalse(c *check.C) {
|
||||
const ourRegistry = "docker://" + v2DockerRegistryURL + "/"
|
||||
|
||||
// dir:test isn't created beforehand just because we already know this could
|
||||
// just fail when evaluating the src
|
||||
assertSkopeoFails(c, ".*server gave HTTP response to HTTPS client.*",
|
||||
"copy", ourRegistry+"foobar", "dir:test")
|
||||
}
|
||||
|
||||
@@ -13,6 +13,13 @@
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/extension": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"docker.io/openshift": [
|
||||
{
|
||||
"type": "insecureAcceptAnything"
|
||||
@@ -85,6 +92,13 @@
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
],
|
||||
"localhost:5000/myns/extension": [
|
||||
{
|
||||
"type": "signedBy",
|
||||
"keyType": "GPGKeys",
|
||||
"keyPath": "@keydir@/personal-pubkey.gpg"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,10 @@ import (
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
var adminKUBECONFIG = map[string]string{
|
||||
"KUBECONFIG": "openshift.local.config/master/admin.kubeconfig",
|
||||
}
|
||||
|
||||
// openshiftCluster is an OpenShift API master and integrated registry
|
||||
// running on localhost.
|
||||
type openshiftCluster struct {
|
||||
@@ -42,10 +46,20 @@ func startOpenshiftCluster(c *check.C) *openshiftCluster {
|
||||
return cluster
|
||||
}
|
||||
|
||||
// clusterCmd creates an exec.Cmd in c.workingDir with current environment modified by environment
|
||||
func (c *openshiftCluster) clusterCmd(env map[string]string, name string, args ...string) *exec.Cmd {
|
||||
cmd := exec.Command(name, args...)
|
||||
cmd.Dir = c.workingDir
|
||||
cmd.Env = os.Environ()
|
||||
for key, value := range env {
|
||||
cmd.Env = modifyEnviron(cmd.Env, key, value)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// startMaster starts the OpenShift master (etcd+API server) and waits for it to be ready, or terminates on failure.
|
||||
func (c *openshiftCluster) startMaster() {
|
||||
c.master = exec.Command("openshift", "start", "master")
|
||||
c.master.Dir = c.workingDir
|
||||
c.master = c.clusterCmd(nil, "openshift", "start", "master")
|
||||
stdout, err := c.master.StdoutPipe()
|
||||
// Send both to the same pipe. This might cause the two streams to be mixed up,
|
||||
// but logging actually goes only to stderr - this primarily ensure we log any
|
||||
@@ -112,14 +126,35 @@ func (c *openshiftCluster) startMaster() {
|
||||
|
||||
// startRegistry starts the OpenShift registry and waits for it to be ready, or terminates on failure.
|
||||
func (c *openshiftCluster) startRegistry() {
|
||||
//KUBECONFIG=openshift.local.config/master/openshift-registry.kubeconfig DOCKER_REGISTRY_URL=127.0.0.1:5000
|
||||
c.registry = exec.Command("dockerregistry", "/atomic-registry-config.yml")
|
||||
c.registry.Dir = c.workingDir
|
||||
c.registry.Env = os.Environ()
|
||||
c.registry.Env = modifyEnviron(c.registry.Env, "KUBECONFIG", "openshift.local.config/master/openshift-registry.kubeconfig")
|
||||
c.registry.Env = modifyEnviron(c.registry.Env, "DOCKER_REGISTRY_URL", "127.0.0.1:5000")
|
||||
// This partially mimics the objects created by (oadm registry), except that we run the
|
||||
// server directly as an ordinary process instead of a pod with an implicitly attached service account.
|
||||
saJSON := `{
|
||||
"apiVersion": "v1",
|
||||
"kind": "ServiceAccount",
|
||||
"metadata": {
|
||||
"name": "registry"
|
||||
}
|
||||
}`
|
||||
cmd := c.clusterCmd(adminKUBECONFIG, "oc", "create", "-f", "-")
|
||||
runExecCmdWithInput(c.c, cmd, saJSON)
|
||||
|
||||
cmd = c.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-user", "system:registry", "-z", "registry")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.c.Assert(string(out), check.Equals, "cluster role \"system:registry\" added: \"registry\"\n")
|
||||
|
||||
cmd = c.clusterCmd(adminKUBECONFIG, "oadm", "create-api-client-config", "--client-dir=openshift.local.registry", "--basename=openshift-registry", "--user=system:serviceaccount:default:registry")
|
||||
out, err = cmd.CombinedOutput()
|
||||
c.c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.c.Assert(string(out), check.Equals, "")
|
||||
|
||||
//KUBECONFIG=openshift.local.registry/openshift-registry.kubeconfig DOCKER_REGISTRY_URL=127.0.0.1:5000
|
||||
c.registry = c.clusterCmd(map[string]string{
|
||||
"KUBECONFIG": "openshift.local.registry/openshift-registry.kubeconfig",
|
||||
"DOCKER_REGISTRY_URL": "127.0.0.1:5000",
|
||||
}, "dockerregistry", "/atomic-registry-config.yml")
|
||||
consumeAndLogOutputs(c.c, "registry", c.registry)
|
||||
err := c.registry.Start()
|
||||
err = c.registry.Start()
|
||||
c.c.Assert(err, check.IsNil)
|
||||
|
||||
portOpen, terminatePortCheck := newPortChecker(c.c, 5000)
|
||||
@@ -134,8 +169,7 @@ func (c *openshiftCluster) startRegistry() {
|
||||
// ocLogin runs (oc login) and (oc new-project) on the cluster, or terminates on failure.
|
||||
func (c *openshiftCluster) ocLoginToProject() {
|
||||
c.c.Logf("oc login")
|
||||
cmd := exec.Command("oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443")
|
||||
cmd.Dir = c.workingDir
|
||||
cmd := c.clusterCmd(nil, "oc", "login", "--certificate-authority=openshift.local.config/master/ca.crt", "-u", "myuser", "-p", "mypw", "https://localhost:8443")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.c.Assert(err, check.IsNil, check.Commentf("%s", out))
|
||||
c.c.Assert(string(out), check.Matches, "(?s).*Login successful.*") // (?s) : '.' will also match newlines
|
||||
@@ -170,13 +204,10 @@ func (c *openshiftCluster) dockerLogin() {
|
||||
// FIXME: This also allows anyone to DoS anyone else; this design is really not all
|
||||
// that workable, but it is the best we can do for now.
|
||||
func (c *openshiftCluster) relaxImageSignerPermissions() {
|
||||
cmd := exec.Command("oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated")
|
||||
cmd.Dir = c.workingDir
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = modifyEnviron(cmd.Env, "KUBECONFIG", "openshift.local.config/master/admin.kubeconfig")
|
||||
cmd := c.clusterCmd(adminKUBECONFIG, "oadm", "policy", "add-cluster-role-to-group", "system:image-signer", "system:authenticated")
|
||||
out, err := cmd.CombinedOutput()
|
||||
c.c.Assert(err, check.IsNil, check.Commentf("%s", string(out)))
|
||||
c.c.Assert(string(out), check.Equals, "")
|
||||
c.c.Assert(string(out), check.Equals, "cluster role \"system:image-signer\" added: \"system:authenticated\"\n")
|
||||
}
|
||||
|
||||
// tearDown stops the cluster services and deletes (only some!) of the state.
|
||||
|
||||
40
integration/openshift_shell_test.go
Normal file
40
integration/openshift_shell_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
// +build openshift_shell
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
/*
|
||||
TestRunShell is not really a test; it is a convenient way to use the registry setup code
|
||||
in openshift.go and CopySuite to get an interactive environment for experimentation.
|
||||
|
||||
To use it, run:
|
||||
sudo make shell
|
||||
to start a container, then within the container:
|
||||
SKOPEO_CONTAINER_TESTS=1 PS1='nested> ' go test -tags openshift_shell -timeout=24h ./integration -v -check.v -check.vv -check.f='CopySuite.TestRunShell'
|
||||
|
||||
An example of what can be done within the container:
|
||||
cd ..; make binary-local install
|
||||
./skopeo --tls-verify=false copy --sign-by=personal@example.com docker://busybox:latest atomic:localhost:5000/myns/personal:personal
|
||||
oc get istag personal:personal -o json
|
||||
curl -L -v 'http://localhost:5000/v2/'
|
||||
cat ~/.docker/config.json
|
||||
curl -L -v 'http://localhost:5000/openshift/token&scope=repository:myns/personal:pull' --header 'Authorization: Basic $auth_from_docker'
|
||||
curl -L -v 'http://localhost:5000/v2/myns/personal/manifests/personal' --header 'Authorization: Bearer $token_from_oauth'
|
||||
curl -L -v 'http://localhost:5000/extensions/v2/myns/personal/signatures/$manifest_digest' --header 'Authorization: Bearer $token_from_oauth'
|
||||
*/
|
||||
func (s *CopySuite) TestRunShell(c *check.C) {
|
||||
cmd := exec.Command("bash", "-i")
|
||||
tty, err := os.OpenFile("/dev/tty", os.O_RDWR, 0)
|
||||
c.Assert(err, check.IsNil)
|
||||
cmd.Stdin = tty
|
||||
cmd.Stdout = tty
|
||||
cmd.Stderr = tty
|
||||
err = cmd.Run()
|
||||
c.Assert(err, check.IsNil)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/go-check/check"
|
||||
)
|
||||
|
||||
@@ -36,7 +37,14 @@ func findFingerprint(lineBytes []byte) (string, error) {
|
||||
}
|
||||
|
||||
func (s *SigningSuite) SetUpTest(c *check.C) {
|
||||
_, err := exec.LookPath(skopeoBinary)
|
||||
mech, _, err := signature.NewEphemeralGPGSigningMechanism([]byte{})
|
||||
c.Assert(err, check.IsNil)
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil { // FIXME? Test that verification and policy enforcement works, using signatures from fixtures
|
||||
c.Skip(fmt.Sprintf("Signing not supported: %v", err))
|
||||
}
|
||||
|
||||
_, err = exec.LookPath(skopeoBinary)
|
||||
c.Assert(err, check.IsNil)
|
||||
|
||||
s.gpgHome, err = ioutil.TempDir("", "skopeo-gpg")
|
||||
|
||||
@@ -74,9 +74,15 @@ func assertSkopeoFails(c *check.C, regexp string, args ...string) {
|
||||
// runCommandWithInput runs a command as if exec.Command(), sending it the input to stdin,
|
||||
// and verifies that the exit status is 0, or terminates c on failure.
|
||||
func runCommandWithInput(c *check.C, input string, name string, args ...string) {
|
||||
c.Logf("Running %s %s", name, strings.Join(args, " "))
|
||||
cmd := exec.Command(name, args...)
|
||||
consumeAndLogOutputs(c, name+" "+strings.Join(args, " "), cmd)
|
||||
runExecCmdWithInput(c, cmd, input)
|
||||
}
|
||||
|
||||
// runExecCmdWithInput runs an exec.Cmd, sending it the input to stdin,
|
||||
// and verifies that the exit status is 0, or terminates c on failure.
|
||||
func runExecCmdWithInput(c *check.C, cmd *exec.Cmd, input string) {
|
||||
c.Logf("Running %s %s", cmd.Path, strings.Join(cmd.Args, " "))
|
||||
consumeAndLogOutputs(c, cmd.Path+" "+strings.Join(cmd.Args, " "), cmd)
|
||||
stdin, err := cmd.StdinPipe()
|
||||
c.Assert(err, check.IsNil)
|
||||
err = cmd.Start()
|
||||
|
||||
45
vendor.conf
Normal file
45
vendor.conf
Normal file
@@ -0,0 +1,45 @@
|
||||
github.com/urfave/cli v1.17.0
|
||||
github.com/containers/image master
|
||||
github.com/opencontainers/go-digest master
|
||||
gopkg.in/cheggaaa/pb.v1 ad4efe000aa550bb54918c06ebbadc0ff17687b9 https://github.com/cheggaaa/pb
|
||||
github.com/containers/storage master
|
||||
github.com/Sirupsen/logrus v0.10.0
|
||||
github.com/go-check/check v1
|
||||
github.com/stretchr/testify v1.1.3
|
||||
github.com/davecgh/go-spew master
|
||||
github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker v1.13.0
|
||||
github.com/docker/go-connections 4ccf312bf1d35e5dbda654e57a9be4c3f3cd0366
|
||||
github.com/vbatts/tar-split v0.10.1
|
||||
github.com/gorilla/context 14f550f51a
|
||||
github.com/gorilla/mux e444e69cbd
|
||||
github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97
|
||||
golang.org/x/net master
|
||||
# end docker deps
|
||||
golang.org/x/text master
|
||||
github.com/docker/distribution master
|
||||
github.com/docker/libtrust master
|
||||
github.com/opencontainers/runc master
|
||||
github.com/opencontainers/image-spec v1.0.0-rc4
|
||||
# -- start OCI image validation requirements.
|
||||
github.com/opencontainers/runtime-spec v1.0.0-rc4
|
||||
github.com/opencontainers/image-tools v0.1.0
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
github.com/xeipuuv/gojsonreference master
|
||||
github.com/xeipuuv/gojsonpointer master
|
||||
go4.org master https://github.com/camlistore/go4
|
||||
# -- end OCI image validation requirements
|
||||
github.com/mtrmac/gpgme master
|
||||
# openshift/origin' k8s dependencies as of OpenShift v1.1.5
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
k8s.io/client-go master
|
||||
github.com/ghodss/yaml 73d445a93680fa1a78ae23a5839bad48f32ba1ee
|
||||
gopkg.in/yaml.v2 d466437aa4adc35830964cffc5b5f262c63ddcb4
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
# containers/storage's dependencies that aren't already being pulled in
|
||||
github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa
|
||||
github.com/pborman/uuid v1.0
|
||||
github.com/opencontainers/selinux master
|
||||
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/Sirupsen/logrus/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
||||
logrus
|
||||
9
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
9
vendor/github.com/Sirupsen/logrus/.travis.yml
generated
vendored
@@ -1,9 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
- 1.5
|
||||
- tip
|
||||
install:
|
||||
- go get -t ./...
|
||||
script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
|
||||
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
66
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@@ -1,66 +0,0 @@
|
||||
# 0.10.0
|
||||
|
||||
* feature: Add a test hook (#180)
|
||||
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||
* performance: avoid re-allocations on `WithFields` (#335)
|
||||
|
||||
# 0.9.0
|
||||
|
||||
* logrus/text_formatter: don't emit empty msg
|
||||
* logrus/hooks/airbrake: move out of main repository
|
||||
* logrus/hooks/sentry: move out of main repository
|
||||
* logrus/hooks/papertrail: move out of main repository
|
||||
* logrus/hooks/bugsnag: move out of main repository
|
||||
* logrus/core: run tests with `-race`
|
||||
* logrus/core: detect TTY based on `stderr`
|
||||
* logrus/core: support `WithError` on logger
|
||||
* logrus/core: Solaris support
|
||||
|
||||
# 0.8.7
|
||||
|
||||
* logrus/core: fix possible race (#216)
|
||||
* logrus/doc: small typo fixes and doc improvements
|
||||
|
||||
|
||||
# 0.8.6
|
||||
|
||||
* hooks/raven: allow passing an initialized client
|
||||
|
||||
# 0.8.5
|
||||
|
||||
* logrus/core: revert #208
|
||||
|
||||
# 0.8.4
|
||||
|
||||
* formatter/text: fix data race (#218)
|
||||
|
||||
# 0.8.3
|
||||
|
||||
* logrus/core: fix entry log level (#208)
|
||||
* logrus/core: improve performance of text formatter by 40%
|
||||
* logrus/core: expose `LevelHooks` type
|
||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||
* formatter/text: print structs more verbosely
|
||||
|
||||
# 0.8.2
|
||||
|
||||
* logrus: fix more Fatal family functions
|
||||
|
||||
# 0.8.1
|
||||
|
||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||
|
||||
# 0.8.0
|
||||
|
||||
* logrus: defaults to stderr instead of stdout
|
||||
* hooks/sentry: add special field for `*http.Request`
|
||||
* formatter/text: ignore Windows for colors
|
||||
|
||||
# 0.7.3
|
||||
|
||||
* formatter/\*: allow configuration of timestamp layout
|
||||
|
||||
# 0.7.2
|
||||
|
||||
* formatter/text: Add configuration option for time format (#158)
|
||||
52
vendor/github.com/containers/image/README.md
generated
vendored
Normal file
52
vendor/github.com/containers/image/README.md
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
[](https://godoc.org/github.com/containers/image) [](https://travis-ci.org/containers/image)
|
||||
=
|
||||
|
||||
`image` is a set of Go libraries aimed at working in various way with
|
||||
containers' images and container image registries.
|
||||
|
||||
The containers/image library allows application to pull and push images from
|
||||
container image registries, like the upstream docker registry. It also
|
||||
implements "simple image signing".
|
||||
|
||||
The containers/image library also allows you to inspect a repository on a
|
||||
container registry without pulling down the image. This means it fetches the
|
||||
repository's manifest and it is able to show you a `docker inspect`-like json
|
||||
output about a whole repository or a tag. This library, in contrast to `docker
|
||||
inspect`, helps you gather useful information about a repository or a tag
|
||||
without requiring you to run `docker pull`.
|
||||
|
||||
The containers/image library also allows you to translate from one image format
|
||||
to another, for example docker container images to OCI images. It also allows
|
||||
you to copy container images between various registries, possibly converting
|
||||
them as necessary, and to sign and verify images.
|
||||
|
||||
The [skopeo](https://github.com/projectatomic/skopeo) tool uses the
|
||||
containers/image library and takes advantage of its many features.
|
||||
|
||||
## Dependencies
|
||||
|
||||
Dependencies that this library prefers will not be found in the `vendor`
|
||||
directory. This is so you can make well-informed decisions about which
|
||||
libraries you should use with this package in your own projects.
|
||||
|
||||
What this project tests against dependencies-wise is located
|
||||
[here](https://github.com/containers/image/blob/master/vendor.conf).
|
||||
|
||||
## Building
|
||||
|
||||
For ordinary use, `go build ./...` is sufficient.
|
||||
|
||||
When developing this library, please use `make` to take advantage of the tests and validation.
|
||||
|
||||
Optionally, you can use the `containers_image_openpgp` build tag (using `go build -tags …`, or `make … BUILDTAGS=…`).
|
||||
This will use a Golang-only OpenPGP implementation for signature verification instead of the default cgo/gpgme-based implementation;
|
||||
the primary downside is that creating new signatures with the Golang-only implementation is not supported.
|
||||
|
||||
## License
|
||||
|
||||
ASL 2.0
|
||||
|
||||
## Contact
|
||||
|
||||
- Mailing list: [containers-dev](https://groups.google.com/forum/?hl=en#!forum/containers-dev)
|
||||
- IRC: #[container-projects](irc://irc.freenode.net:6667/#container-projects) on freenode.net
|
||||
307
vendor/github.com/containers/image/copy/copy.go
generated
vendored
307
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -3,21 +3,23 @@ package copy
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
pb "gopkg.in/cheggaaa/pb.v1"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
|
||||
@@ -32,19 +34,36 @@ type digestingReader struct {
|
||||
validationFailed bool
|
||||
}
|
||||
|
||||
// imageCopier allows us to keep track of diffID values for blobs, and other
|
||||
// data, that we're copying between images, and cache other information that
|
||||
// might allow us to take some shortcuts
|
||||
type imageCopier struct {
|
||||
copiedBlobs map[digest.Digest]digest.Digest
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
dest types.ImageDestination
|
||||
src types.Image
|
||||
rawSource types.ImageSource
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
reportWriter io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
}
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
}
|
||||
digestAlgorithm := expectedDigest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
return &digestingReader{
|
||||
source: source,
|
||||
digester: digestAlgorithm.New(),
|
||||
digester: digestAlgorithm.Digester(),
|
||||
expectedDigest: expectedDigest,
|
||||
validationFailed: false,
|
||||
}, nil
|
||||
@@ -57,14 +76,14 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// Coverage: This should not happen, the hash.Hash interface requires
|
||||
// d.digest.Write to never return an error, and the io.Writer interface
|
||||
// requires n2 == len(input) if no error is returned.
|
||||
return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err)
|
||||
return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
|
||||
}
|
||||
}
|
||||
if err == io.EOF {
|
||||
actualDigest := d.digester.Digest()
|
||||
if actualDigest != d.expectedDigest {
|
||||
d.validationFailed = true
|
||||
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
@@ -77,66 +96,91 @@ type Options struct {
|
||||
ReportWriter io.Writer
|
||||
SourceCtx *types.SystemContext
|
||||
DestinationCtx *types.SystemContext
|
||||
ProgressInterval time.Duration // time to wait between reports to signal the progress channel
|
||||
Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
|
||||
}
|
||||
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility.
|
||||
func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error {
|
||||
// Image copies image from srcRef to destRef, using policyContext to validate
|
||||
// source image admissibility.
|
||||
func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
|
||||
// NOTE this function uses an output parameter for the error return value.
|
||||
// Setting this and returning is the ideal way to return an error.
|
||||
//
|
||||
// the defers in this routine will wrap the error return with its own errors
|
||||
// which can be valuable context in the middle of a multi-streamed copy.
|
||||
if options == nil {
|
||||
options = &Options{}
|
||||
}
|
||||
|
||||
reportWriter := ioutil.Discard
|
||||
if options != nil && options.ReportWriter != nil {
|
||||
|
||||
if options.ReportWriter != nil {
|
||||
reportWriter = options.ReportWriter
|
||||
}
|
||||
|
||||
writeReport := func(f string, a ...interface{}) {
|
||||
fmt.Fprintf(reportWriter, f, a...)
|
||||
}
|
||||
|
||||
dest, err := destRef.NewImageDestination(options.DestinationCtx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err)
|
||||
return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
|
||||
}
|
||||
defer dest.Close()
|
||||
defer func() {
|
||||
if err := dest.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (dest: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
|
||||
|
||||
rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err)
|
||||
return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
unparsedImage := image.UnparsedFromSource(rawSource)
|
||||
defer func() {
|
||||
if unparsedImage != nil {
|
||||
unparsedImage.Close()
|
||||
if err := unparsedImage.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (unparsed: %v)", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Please keep this policy check BEFORE reading any other information about the image.
|
||||
if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
|
||||
return fmt.Errorf("Source image rejected: %v", err)
|
||||
return errors.Wrap(err, "Source image rejected")
|
||||
}
|
||||
src, err := image.FromUnparsedImage(unparsedImage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing image from source %s: %v", transports.ImageName(srcRef), err)
|
||||
return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
|
||||
}
|
||||
unparsedImage = nil
|
||||
defer src.Close()
|
||||
defer func() {
|
||||
if err := src.Close(); err != nil {
|
||||
retErr = errors.Wrapf(retErr, " (source: %v)", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if src.IsMultiImage() {
|
||||
return fmt.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
|
||||
return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
if options != nil && options.RemoveSignatures {
|
||||
if options.RemoveSignatures {
|
||||
sigs = [][]byte{}
|
||||
} else {
|
||||
writeReport("Getting image source signatures\n")
|
||||
s, err := src.Signatures()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading signatures: %v", err)
|
||||
return errors.Wrap(err, "Error reading signatures")
|
||||
}
|
||||
sigs = s
|
||||
}
|
||||
if len(sigs) != 0 {
|
||||
writeReport("Checking if image destination supports signatures\n")
|
||||
if err := dest.SupportsSignatures(); err != nil {
|
||||
return fmt.Errorf("Can not copy signatures: %v", err)
|
||||
return errors.Wrap(err, "Can not copy signatures")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -147,116 +191,120 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copyLayers(&manifestUpdates, dest, src, rawSource, canModifyManifest, reportWriter); err != nil {
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
ic := imageCopier{
|
||||
copiedBlobs: make(map[digest.Digest]digest.Digest),
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
manifestUpdates: &manifestUpdates,
|
||||
dest: dest,
|
||||
src: src,
|
||||
rawSource: rawSource,
|
||||
diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates),
|
||||
canModifyManifest: canModifyManifest,
|
||||
reportWriter: reportWriter,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
}
|
||||
|
||||
if err := ic.copyLayers(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pendingImage := src
|
||||
if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{InformationOnly: manifestUpdates.InformationOnly}) {
|
||||
if !canModifyManifest {
|
||||
return fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
|
||||
return errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
|
||||
}
|
||||
manifestUpdates.InformationOnly.Destination = dest
|
||||
pendingImage, err = src.UpdatedImage(manifestUpdates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating an updated image manifest: %v", err)
|
||||
return errors.Wrap(err, "Error creating an updated image manifest")
|
||||
}
|
||||
}
|
||||
manifest, _, err := pendingImage.Manifest()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading manifest: %v", err)
|
||||
return errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
|
||||
if err := copyConfig(dest, pendingImage, reportWriter); err != nil {
|
||||
if err := ic.copyConfig(pendingImage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if options != nil && options.SignBy != "" {
|
||||
if options.SignBy != "" {
|
||||
mech, err := signature.NewGPGSigningMechanism()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error initializing GPG: %v", err)
|
||||
return errors.Wrap(err, "Error initializing GPG")
|
||||
}
|
||||
defer mech.Close()
|
||||
if err := mech.SupportsSigning(); err != nil {
|
||||
return errors.Wrap(err, "Signing not supported")
|
||||
}
|
||||
|
||||
dockerReference := dest.Reference().DockerReference()
|
||||
if dockerReference == nil {
|
||||
return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||
return errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
|
||||
}
|
||||
|
||||
writeReport("Signing manifest\n")
|
||||
newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating signature: %v", err)
|
||||
return errors.Wrap(err, "Error creating signature")
|
||||
}
|
||||
sigs = append(sigs, newSig)
|
||||
}
|
||||
|
||||
writeReport("Writing manifest to image destination\n")
|
||||
if err := dest.PutManifest(manifest); err != nil {
|
||||
return fmt.Errorf("Error writing manifest: %v", err)
|
||||
return errors.Wrap(err, "Error writing manifest")
|
||||
}
|
||||
|
||||
writeReport("Storing signatures\n")
|
||||
if err := dest.PutSignatures(sigs); err != nil {
|
||||
return fmt.Errorf("Error writing signatures: %v", err)
|
||||
return errors.Wrap(err, "Error writing signatures")
|
||||
}
|
||||
|
||||
if err := dest.Commit(); err != nil {
|
||||
return fmt.Errorf("Error committing the finished image: %v", err)
|
||||
return errors.Wrap(err, "Error committing the finished image")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyLayers copies layers from src/rawSource to dest, using and updating manifestUpdates if necessary and canModifyManifest.
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time this function is called.
|
||||
func copyLayers(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, rawSource types.ImageSource,
|
||||
canModifyManifest bool, reportWriter io.Writer) error {
|
||||
type copiedLayer struct {
|
||||
blobInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
}
|
||||
|
||||
diffIDsAreNeeded := src.UpdatedImageNeedsLayerDiffIDs(*manifestUpdates)
|
||||
|
||||
srcInfos := src.LayerInfos()
|
||||
// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
|
||||
func (ic *imageCopier) copyLayers() error {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
destInfos := []types.BlobInfo{}
|
||||
diffIDs := []digest.Digest{}
|
||||
copiedLayers := map[digest.Digest]copiedLayer{}
|
||||
for _, srcLayer := range srcInfos {
|
||||
cl, ok := copiedLayers[srcLayer.Digest]
|
||||
if !ok {
|
||||
var (
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
)
|
||||
if dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if diffIDsAreNeeded {
|
||||
return errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
}
|
||||
destInfo = srcLayer
|
||||
fmt.Fprintf(reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, dest.Reference().Transport().Name())
|
||||
} else {
|
||||
fmt.Fprintf(reportWriter, "Copying blob %s\n", srcLayer.Digest)
|
||||
destInfo, diffID, err = copyLayer(dest, rawSource, srcLayer, diffIDsAreNeeded, canModifyManifest, reportWriter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var (
|
||||
destInfo types.BlobInfo
|
||||
diffID digest.Digest
|
||||
err error
|
||||
)
|
||||
if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
|
||||
// DiffIDs are, currently, needed only when converting from schema1.
|
||||
// In which case src.LayerInfos will not have URLs because schema1
|
||||
// does not support them.
|
||||
if ic.diffIDsAreNeeded {
|
||||
return errors.New("getting DiffID for foreign layers is unimplemented")
|
||||
}
|
||||
destInfo = srcLayer
|
||||
fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name())
|
||||
} else {
|
||||
destInfo, diffID, err = ic.copyLayer(srcLayer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cl = copiedLayer{blobInfo: destInfo, diffID: diffID}
|
||||
copiedLayers[srcLayer.Digest] = cl
|
||||
}
|
||||
destInfos = append(destInfos, cl.blobInfo)
|
||||
diffIDs = append(diffIDs, cl.diffID)
|
||||
destInfos = append(destInfos, destInfo)
|
||||
diffIDs = append(diffIDs, diffID)
|
||||
}
|
||||
manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if diffIDsAreNeeded {
|
||||
manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
|
||||
if ic.diffIDsAreNeeded {
|
||||
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
|
||||
}
|
||||
if layerDigestsDiffer(srcInfos, destInfos) {
|
||||
manifestUpdates.LayerInfos = destInfos
|
||||
ic.manifestUpdates.LayerInfos = destInfos
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -275,20 +323,20 @@ func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
||||
}
|
||||
|
||||
// copyConfig copies config.json, if any, from src to dest.
|
||||
func copyConfig(dest types.ImageDestination, src types.Image, reportWriter io.Writer) error {
|
||||
func (ic *imageCopier) copyConfig(src types.Image) error {
|
||||
srcInfo := src.ConfigInfo()
|
||||
if srcInfo.Digest != "" {
|
||||
fmt.Fprintf(reportWriter, "Copying config %s\n", srcInfo.Digest)
|
||||
fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
|
||||
configBlob, err := src.ConfigBlob()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading config blob %s: %v", srcInfo.Digest, err)
|
||||
return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
|
||||
}
|
||||
destInfo, err := copyBlobFromStream(dest, bytes.NewReader(configBlob), srcInfo, nil, false, reportWriter)
|
||||
destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destInfo.Digest != srcInfo.Digest {
|
||||
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -303,16 +351,40 @@ type diffIDResult struct {
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, digest.Digest, error) {
|
||||
srcStream, srcBlobSize, err := src.GetBlob(srcInfo)
|
||||
func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
blobInfo, diffIDChan, err := copyLayerFromStream(dest, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded, canCompress, reportWriter)
|
||||
blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
|
||||
diffIDIsNeeded)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
@@ -320,9 +392,10 @@ func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types
|
||||
if diffIDIsNeeded {
|
||||
diffIDResult = <-diffIDChan
|
||||
if diffIDResult.err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("Error computing layer DiffID: %v", diffIDResult.err)
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
}
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
}
|
||||
@@ -331,9 +404,9 @@ func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types
|
||||
// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,
|
||||
// perhaps compressing the stream if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
|
||||
func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(decompressorFunc) io.Writer // = nil
|
||||
func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
|
||||
var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
|
||||
var diffIDChan chan diffIDResult
|
||||
|
||||
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
|
||||
@@ -344,7 +417,7 @@ func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcIn
|
||||
pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
getDiffIDRecorder = func(decompressor decompressorFunc) io.Writer {
|
||||
getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
|
||||
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
|
||||
// reading from the pipe has failed, we don’t really care.
|
||||
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
|
||||
@@ -356,14 +429,13 @@ func copyLayerFromStream(dest types.ImageDestination, srcStream io.Reader, srcIn
|
||||
return pipeWriter
|
||||
}
|
||||
}
|
||||
blobInfo, err := copyBlobFromStream(dest, srcStream, srcInfo,
|
||||
getDiffIDRecorder, canCompress, reportWriter) // Sets err to nil on success
|
||||
blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
|
||||
return blobInfo, diffIDChan, err
|
||||
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
|
||||
}
|
||||
|
||||
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
|
||||
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor decompressorFunc) {
|
||||
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
|
||||
result := diffIDResult{
|
||||
digest: "",
|
||||
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
|
||||
@@ -375,7 +447,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
|
||||
}
|
||||
|
||||
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
|
||||
func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Digest, error) {
|
||||
func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
|
||||
if decompressor != nil {
|
||||
s, err := decompressor(stream)
|
||||
if err != nil {
|
||||
@@ -391,9 +463,9 @@ func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Dige
|
||||
// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
|
||||
// perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor decompressorFunc) io.Writer, canCompress bool,
|
||||
reportWriter io.Writer) (types.BlobInfo, error) {
|
||||
func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
|
||||
getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
|
||||
canCompress bool) (types.BlobInfo, error) {
|
||||
// The copying happens through a pipeline of connected io.Readers.
|
||||
// === Input: srcStream
|
||||
|
||||
@@ -405,27 +477,27 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error preparing to verify blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
|
||||
}
|
||||
var destStream io.Reader = digestingReader
|
||||
|
||||
// === Detect compression of the input stream.
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by detectCompression.
|
||||
decompressor, destStream, err := detectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
// This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
|
||||
decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
isCompressed := decompressor != nil
|
||||
|
||||
// === Report progress using a pb.Reader.
|
||||
bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
|
||||
bar.Output = reportWriter
|
||||
bar.Output = ic.reportWriter
|
||||
bar.SetMaxWidth(80)
|
||||
bar.ShowTimeLeft = false
|
||||
bar.ShowPercent = false
|
||||
bar.Start()
|
||||
destStream = bar.NewProxyReader(destStream)
|
||||
defer fmt.Fprint(reportWriter, "\n")
|
||||
defer fmt.Fprint(ic.reportWriter, "\n")
|
||||
|
||||
// === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
|
||||
var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
|
||||
@@ -436,7 +508,7 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf
|
||||
|
||||
// === Compress the layer if it is uncompressed and compression is desired
|
||||
var inputInfo types.BlobInfo
|
||||
if !canCompress || isCompressed || !dest.ShouldCompressLayers() {
|
||||
if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
inputInfo = srcInfo
|
||||
} else {
|
||||
@@ -453,10 +525,21 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf
|
||||
inputInfo.Size = -1
|
||||
}
|
||||
|
||||
// === Report progress using the ic.progress channel, if required.
|
||||
if ic.progress != nil && ic.progressInterval > 0 {
|
||||
destStream = &progressReader{
|
||||
source: destStream,
|
||||
channel: ic.progress,
|
||||
interval: ic.progressInterval,
|
||||
artifact: srcInfo,
|
||||
lastTime: time.Now(),
|
||||
}
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := dest.PutBlob(destStream, inputInfo)
|
||||
uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err)
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
|
||||
// This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
|
||||
@@ -467,15 +550,15 @@ func copyBlobFromStream(dest types.ImageDestination, srcStream io.Reader, srcInf
|
||||
logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
|
||||
_, err := io.Copy(ioutil.Discard, originalLayerReader)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error reading input blob %s: %v", srcInfo.Digest, err)
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
if digestingReader.validationFailed { // Coverage: This should never happen.
|
||||
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
|
||||
}
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, fmt.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
@@ -506,7 +589,7 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s
|
||||
|
||||
_, srcType, err := src.Manifest()
|
||||
if err != nil { // This should have been cached?!
|
||||
return fmt.Errorf("Error reading manifest: %v", err)
|
||||
return errors.Wrap(err, "Error reading manifest")
|
||||
}
|
||||
if _, ok := supportedByDest[srcType]; ok {
|
||||
logrus.Debugf("Manifest MIME type %s is declared supported by the destination", srcType)
|
||||
|
||||
28
vendor/github.com/containers/image/copy/progress_reader.go
generated
vendored
Normal file
28
vendor/github.com/containers/image/copy/progress_reader.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// progressReader is a reader that reports its progress on an interval.
|
||||
type progressReader struct {
|
||||
source io.Reader
|
||||
channel chan types.ProgressProperties
|
||||
interval time.Duration
|
||||
artifact types.BlobInfo
|
||||
lastTime time.Time
|
||||
offset uint64
|
||||
}
|
||||
|
||||
func (r *progressReader) Read(p []byte) (int, error) {
|
||||
n, err := r.source.Read(p)
|
||||
r.offset += uint64(n)
|
||||
if time.Since(r.lastTime) > r.interval {
|
||||
r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset}
|
||||
r.lastTime = time.Now()
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
34
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
34
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -1,13 +1,13 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dirImageDestination struct {
|
||||
@@ -26,7 +26,8 @@ func (d *dirImageDestination) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *dirImageDestination) Close() {
|
||||
func (d *dirImageDestination) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) SupportedManifestMIMETypes() []string {
|
||||
@@ -69,7 +70,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
}
|
||||
}()
|
||||
|
||||
digester := digest.Canonical.New()
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
|
||||
size, err := io.Copy(blobFile, tee)
|
||||
@@ -78,7 +79,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
}
|
||||
computedDigest := digester.Digest()
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -94,6 +95,29 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
||||
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
||||
}
|
||||
|
||||
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
9
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@@ -1,14 +1,14 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dirImageSource struct {
|
||||
@@ -28,7 +28,8 @@ func (s *dirImageSource) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *dirImageSource) Close() {
|
||||
func (s *dirImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
@@ -42,7 +43,7 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) {
|
||||
}
|
||||
|
||||
func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
return nil, "", fmt.Errorf(`Getting target manifest not supported by "dir:"`)
|
||||
return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
|
||||
16
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
16
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@@ -1,18 +1,24 @@
|
||||
package directory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/directory/explicitfilepath"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for directory paths.
|
||||
var Transport = dirTransport{}
|
||||
|
||||
@@ -33,7 +39,7 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if !strings.HasPrefix(scope, "/") {
|
||||
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||
return errors.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||
}
|
||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||
@@ -42,7 +48,7 @@ func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
}
|
||||
cleaned := filepath.Clean(scope)
|
||||
if cleaned != scope {
|
||||
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||
return errors.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -153,7 +159,7 @@ func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.Ima
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref dirReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
return fmt.Errorf("Deleting images not implemented for dir: images")
|
||||
return errors.Errorf("Deleting images not implemented for dir: images")
|
||||
}
|
||||
|
||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||
|
||||
7
vendor/github.com/containers/image/directory/explicitfilepath/path.go
generated
vendored
7
vendor/github.com/containers/image/directory/explicitfilepath/path.go
generated
vendored
@@ -1,9 +1,10 @@
|
||||
package explicitfilepath
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ResolvePathToFullyExplicit returns the input path converted to an absolute, no-symlinks, cleaned up path.
|
||||
@@ -25,14 +26,14 @@ func ResolvePathToFullyExplicit(path string) (string, error) {
|
||||
// This can still happen if there is a filesystem race condition, causing the Lstat() above to fail but the later resolution to succeed.
|
||||
// We do not care to promise anything if such filesystem race conditions can happen, but we definitely don't want to return "."/".." components
|
||||
// in the resulting path, and especially not at the end.
|
||||
return "", fmt.Errorf("Unexpectedly missing special filename component in %s", path)
|
||||
return "", errors.Errorf("Unexpectedly missing special filename component in %s", path)
|
||||
}
|
||||
resolvedPath := filepath.Join(resolvedParent, file)
|
||||
// As a sanity check, ensure that there are no "." or ".." components.
|
||||
cleanedResolvedPath := filepath.Clean(resolvedPath)
|
||||
if cleanedResolvedPath != resolvedPath {
|
||||
// Coverage: This should never happen.
|
||||
return "", fmt.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
|
||||
return "", errors.Errorf("Internal inconsistency: Path %s resolved to %s still cleaned up to %s", path, resolvedPath, cleanedResolvedPath)
|
||||
}
|
||||
return resolvedPath, nil
|
||||
default: // err != nil, unrecognized
|
||||
|
||||
57
vendor/github.com/containers/image/docker/archive/dest.go
generated
vendored
Normal file
57
vendor/github.com/containers/image/docker/archive/dest.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type archiveImageDestination struct {
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
ref archiveReference
|
||||
writer io.Closer
|
||||
}
|
||||
|
||||
func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
|
||||
if ref.destinationRef == nil {
|
||||
return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form <path>:<reference:tag>)")
|
||||
}
|
||||
fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_EXCL|os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
// FIXME: It should be possible to modify archives, but the only really
|
||||
// sane way of doing it is to create a copy of the image, modify
|
||||
// it and then do a rename(2).
|
||||
if os.IsExist(err) {
|
||||
err = errors.New("docker-archive doesn't support modifying existing images")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &archiveImageDestination{
|
||||
Destination: tarfile.NewDestination(fh, ref.destinationRef),
|
||||
ref: ref,
|
||||
writer: fh,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||
return d.ref
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *archiveImageDestination) Close() error {
|
||||
return d.writer.Close()
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *archiveImageDestination) Commit() error {
|
||||
return d.Destination.Commit()
|
||||
}
|
||||
36
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
Normal file
36
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
type archiveImageSource struct {
|
||||
*tarfile.Source // Implements most of types.ImageSource
|
||||
ref archiveReference
|
||||
}
|
||||
|
||||
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
|
||||
if ref.destinationRef != nil {
|
||||
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
|
||||
}
|
||||
src := tarfile.NewSource(ref.path)
|
||||
return &archiveImageSource{
|
||||
Source: src,
|
||||
ref: ref,
|
||||
}
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
func (s *archiveImageSource) Reference() types.ImageReference {
|
||||
return s.ref
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *archiveImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
155
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
Normal file
155
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
||||
package archive
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
ctrImage "github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for local Docker archives.
|
||||
var Transport = archiveTransport{}
|
||||
|
||||
type archiveTransport struct{}
|
||||
|
||||
func (t archiveTransport) Name() string {
|
||||
return "docker-archive"
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
|
||||
func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||
return ParseReference(reference)
|
||||
}
|
||||
|
||||
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// See the explanation in archiveReference.PolicyConfigurationIdentity.
|
||||
return errors.New(`docker-archive: does not support any scopes except the default "" one`)
|
||||
}
|
||||
|
||||
// archiveReference is an ImageReference for Docker images.
|
||||
type archiveReference struct {
|
||||
destinationRef reference.NamedTagged // only used for destinations
|
||||
path string
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if refString == "" {
|
||||
return nil, errors.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
|
||||
}
|
||||
|
||||
parts := strings.SplitN(refString, ":", 2)
|
||||
path := parts[0]
|
||||
var destinationRef reference.NamedTagged
|
||||
|
||||
// A :tag was specified, which is only necessary for destinations.
|
||||
if len(parts) == 2 {
|
||||
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "docker-archive parsing reference")
|
||||
}
|
||||
ref = reference.TagNameOnly(ref)
|
||||
|
||||
if _, isDigest := ref.(reference.Canonical); isDigest {
|
||||
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString)
|
||||
}
|
||||
|
||||
refTagged, isTagged := ref.(reference.NamedTagged)
|
||||
if !isTagged {
|
||||
// Really shouldn't be hit...
|
||||
return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
|
||||
}
|
||||
destinationRef = refTagged
|
||||
}
|
||||
|
||||
return archiveReference{
|
||||
destinationRef: destinationRef,
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ref archiveReference) Transport() types.ImageTransport {
|
||||
return Transport
|
||||
}
|
||||
|
||||
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref archiveReference) StringWithinTransport() string {
|
||||
if ref.destinationRef == nil {
|
||||
return ref.path
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
|
||||
func (ref archiveReference) DockerReference() reference.Named {
|
||||
return ref.destinationRef
|
||||
}
|
||||
|
||||
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
|
||||
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
|
||||
// (i.e. various references with exactly the same semantics should return the same configuration identity)
|
||||
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
|
||||
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||
// Returns "" if configuration identities for these references are not supported.
|
||||
func (ref archiveReference) PolicyConfigurationIdentity() string {
|
||||
// Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
|
||||
return ""
|
||||
}
|
||||
|
||||
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
|
||||
// in order, terminating on first match, and an implicit "" is always checked at the end.
|
||||
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
|
||||
// and each following element to be a prefix of the element preceding it.
|
||||
func (ref archiveReference) PolicyConfigurationNamespaces() []string {
|
||||
// TODO
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
|
||||
// The caller must call .Close() on the returned Image.
|
||||
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||
func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||
src := newImageSource(ctx, ref)
|
||||
return ctrImage.FromSource(src)
|
||||
}
|
||||
|
||||
// NewImageSource returns a types.ImageSource for this reference,
|
||||
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
return newImageSource(ctx, ref), nil
|
||||
}
|
||||
|
||||
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||
// The caller must call .Close() on the returned ImageDestination.
|
||||
func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(ctx, ref)
|
||||
}
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
// Not really supported, for safety reasons.
|
||||
return errors.New("Deleting images not implemented for docker-archive: images")
|
||||
}
|
||||
212
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
212
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
@@ -1,33 +1,24 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type daemonImageDestination struct {
|
||||
ref daemonReference
|
||||
namedTaggedRef reference.NamedTagged // Strictly speaking redundant with ref above; having the field makes it structurally impossible for later users to fail.
|
||||
ref daemonReference
|
||||
*tarfile.Destination // Implements most of types.ImageDestination
|
||||
// For talking to imageLoadGoroutine
|
||||
goroutineCancel context.CancelFunc
|
||||
statusChannel <-chan error
|
||||
writer *io.PipeWriter
|
||||
tar *tar.Writer
|
||||
// Other state
|
||||
committed bool // writer has been closed
|
||||
}
|
||||
@@ -35,16 +26,16 @@ type daemonImageDestination struct {
|
||||
// newImageDestination returns a types.ImageDestination for the specified image reference.
|
||||
func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
|
||||
if ref.ref == nil {
|
||||
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
}
|
||||
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
|
||||
}
|
||||
|
||||
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error initializing docker engine client: %v", err)
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
}
|
||||
|
||||
reader, writer := io.Pipe()
|
||||
@@ -56,11 +47,10 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
|
||||
|
||||
return &daemonImageDestination{
|
||||
ref: ref,
|
||||
namedTaggedRef: namedTaggedRef,
|
||||
Destination: tarfile.NewDestination(writer, namedTaggedRef),
|
||||
goroutineCancel: goroutineCancel,
|
||||
statusChannel: statusChannel,
|
||||
writer: writer,
|
||||
tar: tar.NewWriter(writer),
|
||||
committed: false,
|
||||
}, nil
|
||||
}
|
||||
@@ -82,206 +72,44 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
|
||||
|
||||
resp, err := c.ImageLoad(ctx, reader, true)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error saving image to docker engine: %v", err)
|
||||
err = errors.Wrap(err, "Error saving image to docker engine")
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *daemonImageDestination) Close() {
|
||||
func (d *daemonImageDestination) Close() error {
|
||||
if !d.committed {
|
||||
logrus.Debugf("docker-daemon: Closing tar stream to abort loading")
|
||||
// In principle, goroutineCancel() should abort the HTTP request and stop the process from continuing.
|
||||
// In practice, though, https://github.com/docker/engine-api/blob/master/client/transport/cancellable/cancellable.go
|
||||
// currently just runs the HTTP request to completion in a goroutine, and returns early if the context is canceled
|
||||
// without terminating the HTTP request at all. So we need this CloseWithError to terminate sending the HTTP request Body
|
||||
// In practice, though, various HTTP implementations used by client.Client.ImageLoad() (including
|
||||
// https://github.com/golang/net/blob/master/context/ctxhttp/ctxhttp_pre17.go and the
|
||||
// net/http version with native Context support in Go 1.7) do not always actually immediately cancel
|
||||
// the operation: they may process the HTTP request, or a part of it, to completion in a goroutine, and
|
||||
// return early if the context is canceled without terminating the goroutine at all.
|
||||
// So we need this CloseWithError to terminate sending the HTTP request Body
|
||||
// immediately, and hopefully, through terminating the sending which uses "Transfer-Encoding: chunked"" without sending
|
||||
// the terminating zero-length chunk, prevent the docker daemon from processing the tar stream at all.
|
||||
// Whether that works or not, closing the PipeWriter seems desirable in any case.
|
||||
d.writer.CloseWithError(errors.New("Aborting upload, daemonImageDestination closed without a previous .Commit()"))
|
||||
}
|
||||
d.goroutineCancel()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
func (d *daemonImageDestination) Reference() types.ImageReference {
|
||||
return d.ref
|
||||
}
|
||||
|
||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||
func (d *daemonImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *daemonImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *daemonImageDestination) ShouldCompressLayers() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (d *daemonImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() == "" {
|
||||
return types.BlobInfo{}, fmt.Errorf(`"Can not stream a blob with unknown digest to "docker-daemon:"`)
|
||||
}
|
||||
|
||||
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
logrus.Debugf("docker-daemon: input with unknown size, streaming to disk first…")
|
||||
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer os.Remove(streamCopy.Name())
|
||||
defer streamCopy.Close()
|
||||
|
||||
size, err := io.Copy(streamCopy, stream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
_, err = streamCopy.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
||||
stream = streamCopy
|
||||
logrus.Debugf("… streaming done")
|
||||
}
|
||||
|
||||
digester := digest.Canonical.New()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
func (d *daemonImageDestination) PutManifest(m []byte) error {
|
||||
var man schema2Manifest
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return fmt.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
}
|
||||
|
||||
layerPaths := []string{}
|
||||
for _, l := range man.Layers {
|
||||
layerPaths = append(layerPaths, l.Digest.String())
|
||||
}
|
||||
|
||||
// For github.com/docker/docker consumers, this works just as well as
|
||||
// refString := d.namedTaggedRef.String() [i.e. d.ref.ref.String()]
|
||||
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
||||
// normalizes both of them to the same value.
|
||||
//
|
||||
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
||||
// a difference for github.com/projectatomic/docker consumers, with the
|
||||
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
||||
// These consumers treat reference strings which include a hostname and reference
|
||||
// strings without a hostname differently.
|
||||
//
|
||||
// Using the host name here is more explicit about the intent, and it has the same
|
||||
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
||||
// a hostname-qualified reference.
|
||||
// See https://github.com/containers/image/issues/72 for a more detailed
|
||||
// analysis and explanation.
|
||||
refString := fmt.Sprintf("%s:%s", d.namedTaggedRef.FullName(), d.namedTaggedRef.Tag())
|
||||
|
||||
items := []manifestItem{{
|
||||
Config: man.Config.Digest.String(),
|
||||
RepoTags: []string{refString},
|
||||
Layers: layerPaths,
|
||||
Parent: "",
|
||||
LayerSources: nil,
|
||||
}}
|
||||
itemsBytes, err := json.Marshal(&items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME? Do we also need to support the legacy format?
|
||||
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (t *tarFI) Name() string {
|
||||
return t.path
|
||||
}
|
||||
func (t *tarFI) Size() int64 {
|
||||
return t.size
|
||||
}
|
||||
func (t *tarFI) Mode() os.FileMode {
|
||||
return 0444
|
||||
}
|
||||
func (t *tarFI) ModTime() time.Time {
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
func (t *tarFI) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (t *tarFI) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendFile sends a file into the tar stream.
|
||||
func (d *daemonImageDestination) sendFile(path string, expectedSize int64, stream io.Reader) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar file %s", path)
|
||||
if err := d.tar.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
size, err := io.Copy(d.tar, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size != expectedSize {
|
||||
return fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (d *daemonImageDestination) Commit() error {
|
||||
logrus.Debugf("docker-daemon: Closing tar stream")
|
||||
if err := d.tar.Close(); err != nil {
|
||||
if err := d.Destination.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.writer.Close(); err != nil {
|
||||
|
||||
298
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
298
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
@@ -1,35 +1,23 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/docker/tarfile"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/engine-api/client"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
|
||||
|
||||
type daemonImageSource struct {
|
||||
ref daemonReference
|
||||
tarCopyPath string
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
tarManifest *manifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
configDigest digest.Digest
|
||||
orderedDiffIDList []diffID
|
||||
knownLayers map[diffID]*layerInfo
|
||||
// Other state
|
||||
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
|
||||
ref daemonReference
|
||||
*tarfile.Source // Implements most of types.ImageSource
|
||||
tarCopyPath string
|
||||
}
|
||||
|
||||
type layerInfo struct {
|
||||
@@ -49,13 +37,13 @@ type layerInfo struct {
|
||||
func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) {
|
||||
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error initializing docker engine client: %v", err)
|
||||
return nil, errors.Wrap(err, "Error initializing docker engine client")
|
||||
}
|
||||
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
|
||||
// Either way ImageSave should create a tarball with exactly one image.
|
||||
inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading image from docker engine: %v", err)
|
||||
return nil, errors.Wrap(err, "Error loading image from docker engine")
|
||||
}
|
||||
defer inputStream.Close()
|
||||
|
||||
@@ -80,6 +68,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS
|
||||
succeeded = true
|
||||
return &daemonImageSource{
|
||||
ref: ref,
|
||||
Source: tarfile.NewSource(tarCopyFile.Name()),
|
||||
tarCopyPath: tarCopyFile.Name(),
|
||||
}, nil
|
||||
}
|
||||
@@ -91,271 +80,6 @@ func (s *daemonImageSource) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *daemonImageSource) Close() {
|
||||
_ = os.Remove(s.tarCopyPath)
|
||||
}
|
||||
|
||||
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
||||
type tarReadCloser struct {
|
||||
*tar.Reader
|
||||
backingFile *os.File
|
||||
}
|
||||
|
||||
func (t *tarReadCloser) Close() error {
|
||||
return t.backingFile.Close()
|
||||
}
|
||||
|
||||
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
||||
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
||||
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarCopyPath cheap enough.
|
||||
// The caller should call .Close() on the returned stream.
|
||||
func (s *daemonImageSource) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||
f, err := os.Open(s.tarCopyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
tarReader, header, err := findTarComponent(f, componentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
||||
// We follow only one symlink; so no loops are possible.
|
||||
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
||||
// so we don't care.
|
||||
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, fmt.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
}
|
||||
|
||||
// findTarComponent returns a header and a reader matching path within inputFile,
|
||||
// or (nil, nil, nil) if not found.
|
||||
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
|
||||
t := tar.NewReader(inputFile)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if h.Name == path {
|
||||
return t, h, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) {
|
||||
file, err := s.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error loading tar component %s: %v", path, err)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
func (s *daemonImageSource) ensureCachedDataIsPresent() error {
|
||||
if s.tarManifest != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return fmt.Errorf("Error decoding tar config %s: %v", tarManifest.Config, err)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = tarManifest
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := s.readTarComponent(manifestFileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var items []manifestItem
|
||||
if err := json.Unmarshal(bytes, &items); err != nil {
|
||||
return nil, fmt.Errorf("Error decoding tar manifest.json: %v", err)
|
||||
}
|
||||
if len(items) != 1 {
|
||||
return nil, fmt.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
|
||||
}
|
||||
return &items[0], nil
|
||||
}
|
||||
|
||||
func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) {
|
||||
// Collect layer data available in manifest and config.
|
||||
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
||||
return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
||||
}
|
||||
knownLayers := map[diffID]*layerInfo{}
|
||||
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
||||
for i, diffID := range parsedConfig.RootFS.DiffIDs {
|
||||
if _, ok := knownLayers[diffID]; ok {
|
||||
// Apparently it really can happen that a single image contains the same layer diff more than once.
|
||||
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
|
||||
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
||||
continue
|
||||
}
|
||||
layerPath := tarManifest.Layers[i]
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, fmt.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
}
|
||||
li := &layerInfo{ // A new element in each iteration
|
||||
path: layerPath,
|
||||
size: -1,
|
||||
}
|
||||
knownLayers[diffID] = li
|
||||
unknownLayerSizes[layerPath] = li
|
||||
}
|
||||
|
||||
// Scan the tar file to collect layer sizes.
|
||||
file, err := os.Open(s.tarCopyPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
t := tar.NewReader(file)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||
li.size = h.Size
|
||||
delete(unknownLayerSizes, h.Name)
|
||||
}
|
||||
}
|
||||
if len(unknownLayerSizes) != 0 {
|
||||
return nil, fmt.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
||||
}
|
||||
|
||||
return knownLayers, nil
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
|
||||
if s.generatedManifest == nil {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
m := schema2Manifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: manifest.DockerV2Schema2MediaType,
|
||||
Config: distributionDescriptor{
|
||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||
Size: int64(len(s.configBytes)),
|
||||
Digest: s.configDigest,
|
||||
},
|
||||
Layers: []distributionDescriptor{},
|
||||
}
|
||||
for _, diffID := range s.orderedDiffIDList {
|
||||
li, ok := s.knownLayers[diffID]
|
||||
if !ok {
|
||||
return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||
}
|
||||
m.Layers = append(m.Layers, distributionDescriptor{
|
||||
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
||||
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
||||
Size: li.size,
|
||||
})
|
||||
}
|
||||
manifestBytes, err := json.Marshal(&m)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.generatedManifest = manifestBytes
|
||||
}
|
||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||
}
|
||||
|
||||
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
|
||||
// out of a manifest list.
|
||||
func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
|
||||
return nil, "", fmt.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *daemonImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
||||
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
stream, err := s.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return stream, li.size, nil
|
||||
}
|
||||
|
||||
return nil, 0, fmt.Errorf("Unknown blob %s", info.Digest)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
func (s *daemonImageSource) GetSignatures() ([][]byte, error) {
|
||||
return [][]byte{}, nil
|
||||
func (s *daemonImageSource) Close() error {
|
||||
return os.Remove(s.tarCopyPath)
|
||||
}
|
||||
|
||||
35
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
35
vendor/github.com/containers/image/docker/daemon/daemon_transport.go
generated
vendored
@@ -1,15 +1,19 @@
|
||||
package daemon
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for images managed by a local Docker daemon.
|
||||
var Transport = daemonTransport{}
|
||||
|
||||
@@ -47,26 +51,26 @@ type daemonReference struct {
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
// This is intended to be compatible with reference.ParseIDOrReference, but more strict about refusing some of the ambiguous cases.
|
||||
// This is intended to be compatible with reference.ParseAnyReference, but more strict about refusing some of the ambiguous cases.
|
||||
// In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars).
|
||||
|
||||
// digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag).
|
||||
// reference.ParseIDOrReference interprets such strings as digests.
|
||||
if dgst, err := digest.ParseDigest(refString); err == nil {
|
||||
// reference.ParseAnyReference interprets such strings as digests.
|
||||
if dgst, err := digest.Parse(refString); err == nil {
|
||||
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
|
||||
// Other digest references are ambiguous, so refuse them.
|
||||
if dgst.Algorithm() != digest.Canonical {
|
||||
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
|
||||
return nil, errors.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
|
||||
}
|
||||
return NewReference(dgst, nil)
|
||||
}
|
||||
|
||||
ref, err := reference.ParseNamed(refString) // This also rejects unprefixed digest values
|
||||
ref, err := reference.ParseNormalizedNamed(refString) // This also rejects unprefixed digest values
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ref.Name() == digest.Canonical.String() {
|
||||
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
|
||||
if reference.FamiliarName(ref) == digest.Canonical.String() {
|
||||
return nil, errors.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
|
||||
}
|
||||
return NewReference("", ref)
|
||||
}
|
||||
@@ -78,14 +82,15 @@ func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference,
|
||||
}
|
||||
if ref != nil {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String())
|
||||
return nil, errors.Errorf("docker-daemon: reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// docker/reference does not handle that, so fail.
|
||||
// Most versions of docker/reference do not handle that (ignoring the tag), so reject such input.
|
||||
// This MAY be accepted in the future.
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
|
||||
return nil, errors.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
|
||||
}
|
||||
}
|
||||
return daemonReference{
|
||||
@@ -109,7 +114,7 @@ func (ref daemonReference) StringWithinTransport() string {
|
||||
case ref.id != "":
|
||||
return ref.id.String()
|
||||
case ref.ref != nil:
|
||||
return ref.ref.String()
|
||||
return reference.FamiliarString(ref.ref)
|
||||
default: // Coverage: Should never happen, NewReference above should refuse such values.
|
||||
panic("Internal inconsistency: daemonReference has empty id and nil ref")
|
||||
}
|
||||
@@ -175,5 +180,5 @@ func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
// Should this just untag the image? Should this stop running containers?
|
||||
// The semantics is not quite as clear as for remote repositories.
|
||||
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
|
||||
return fmt.Errorf("Deleting images not implemented for docker-daemon: images")
|
||||
return errors.Errorf("Deleting images not implemented for docker-daemon: images")
|
||||
}
|
||||
|
||||
291
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
291
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -16,10 +15,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/docker/go-connections/tlsconfig"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -31,28 +34,67 @@ const (
|
||||
dockerCfgFileName = "config.json"
|
||||
dockerCfgObsolete = ".dockercfg"
|
||||
|
||||
baseURL = "%s://%s/v2/"
|
||||
baseURLV1 = "%s://%s/v1/_ping"
|
||||
tagsURL = "%s/tags/list"
|
||||
manifestURL = "%s/manifests/%s"
|
||||
blobsURL = "%s/blobs/%s"
|
||||
blobUploadURL = "%s/blobs/uploads/"
|
||||
resolvedPingV2URL = "%s://%s/v2/"
|
||||
resolvedPingV1URL = "%s://%s/v1/_ping"
|
||||
tagsPath = "/v2/%s/tags/list"
|
||||
manifestPath = "/v2/%s/manifests/%s"
|
||||
blobsPath = "/v2/%s/blobs/%s"
|
||||
blobUploadPath = "/v2/%s/blobs/uploads/"
|
||||
extensionsSignaturePath = "/extensions/v2/%s/signatures/%s"
|
||||
|
||||
minimumTokenLifetimeSeconds = 60
|
||||
|
||||
extensionSignatureSchemaVersion = 2 // extensionSignature.Version
|
||||
extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
|
||||
)
|
||||
|
||||
// ErrV1NotSupported is returned when we're trying to talk to a
|
||||
// docker V1 registry.
|
||||
var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
|
||||
|
||||
// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
|
||||
// signature represents a Docker image signature.
|
||||
type extensionSignature struct {
|
||||
Version int `json:"schemaVersion"` // Version specifies the schema version
|
||||
Name string `json:"name"` // Name must be in "sha256:<digest>@signatureName" format
|
||||
Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1"
|
||||
Content []byte `json:"content"` // Content contains the signature
|
||||
}
|
||||
|
||||
// signatureList represents list of Docker image signatures.
|
||||
type extensionSignatureList struct {
|
||||
Signatures []extensionSignature `json:"signatures"`
|
||||
}
|
||||
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
type dockerClient struct {
|
||||
ctx *types.SystemContext
|
||||
registry string
|
||||
username string
|
||||
password string
|
||||
wwwAuthenticate string // Cache of a value set by ping() if scheme is not empty
|
||||
scheme string // Cache of a value returned by a successful ping() if not empty
|
||||
client *http.Client
|
||||
signatureBase signatureStorageBase
|
||||
// The following members are set by newDockerClient and do not change afterwards.
|
||||
ctx *types.SystemContext
|
||||
registry string
|
||||
username string
|
||||
password string
|
||||
client *http.Client
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
// The following members are private state for setupRequestAuth, both are valid if token != nil.
|
||||
token *bearerToken
|
||||
tokenExpiration time.Time
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
remoteName string
|
||||
actions string
|
||||
}
|
||||
|
||||
// this is cloned from docker/go-connections because upstream docker has changed
|
||||
@@ -101,7 +143,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
if strings.HasSuffix(f.Name(), ".crt") {
|
||||
systemPool, err := tlsconfig.SystemCertPool()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to get system cert pool: %v", err)
|
||||
return errors.Wrap(err, "unable to get system cert pool")
|
||||
}
|
||||
tlsc.RootCAs = systemPool
|
||||
logrus.Debugf("crt: %s", fullPath)
|
||||
@@ -116,7 +158,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
keyName := certName[:len(certName)-5] + ".key"
|
||||
logrus.Debugf("cert: %s", fullPath)
|
||||
if !hasFile(fs, keyName) {
|
||||
return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
|
||||
}
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
|
||||
if err != nil {
|
||||
@@ -129,7 +171,7 @@ func setupCertificates(dir string, tlsc *tls.Config) error {
|
||||
certName := keyName[:len(keyName)-4] + ".cert"
|
||||
logrus.Debugf("key: %s", fullPath)
|
||||
if !hasFile(fs, certName) {
|
||||
return fmt.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -147,12 +189,12 @@ func hasFile(files []os.FileInfo, name string) bool {
|
||||
|
||||
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
|
||||
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
|
||||
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) (*dockerClient, error) {
|
||||
registry := ref.ref.Hostname()
|
||||
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
|
||||
registry := reference.Domain(ref.ref)
|
||||
if registry == dockerHostname {
|
||||
registry = dockerRegistry
|
||||
}
|
||||
username, password, err := getAuth(ctx, ref.ref.Hostname())
|
||||
username, password, err := getAuth(ctx, reference.Domain(ref.ref))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -184,22 +226,21 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool)
|
||||
password: password,
|
||||
client: client,
|
||||
signatureBase: sigBase,
|
||||
scope: authScope{
|
||||
actions: actions,
|
||||
remoteName: reference.Path(ref.ref),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
|
||||
// url is NOT an absolute URL, but a path relative to the /v2/ top-level API path. The host name and schema is taken from the client or autodetected.
|
||||
func (c *dockerClient) makeRequest(method, url string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
if c.scheme == "" {
|
||||
pr, err := c.ping()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.wwwAuthenticate = pr.WWWAuthenticate
|
||||
c.scheme = pr.scheme
|
||||
// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
|
||||
func (c *dockerClient) makeRequest(method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
|
||||
if err := c.detectProperties(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
url = fmt.Sprintf(baseURL, c.scheme, c.registry) + url
|
||||
url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
|
||||
return c.makeRequestToResolvedURL(method, url, headers, stream, -1, true)
|
||||
}
|
||||
|
||||
@@ -224,7 +265,7 @@ func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[
|
||||
if c.ctx != nil && c.ctx.DockerRegistryUserAgent != "" {
|
||||
req.Header.Add("User-Agent", c.ctx.DockerRegistryUserAgent)
|
||||
}
|
||||
if c.wwwAuthenticate != "" && sendAuth {
|
||||
if sendAuth {
|
||||
if err := c.setupRequestAuth(req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -237,63 +278,48 @@ func (c *dockerClient) makeRequestToResolvedURL(method, url string, headers map[
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// we're using the challenges from the /v2/ ping response and not the one from the destination
|
||||
// URL in this request because:
|
||||
//
|
||||
// 1) docker does that as well
|
||||
// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
|
||||
//
|
||||
// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
|
||||
func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
tokens := strings.SplitN(strings.TrimSpace(c.wwwAuthenticate), " ", 2)
|
||||
if len(tokens) != 2 {
|
||||
return fmt.Errorf("expected 2 tokens in WWW-Authenticate: %d, %s", len(tokens), c.wwwAuthenticate)
|
||||
if len(c.challenges) == 0 {
|
||||
return nil
|
||||
}
|
||||
switch tokens[0] {
|
||||
case "Basic":
|
||||
// assume just one...
|
||||
challenge := c.challenges[0]
|
||||
switch challenge.Scheme {
|
||||
case "basic":
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "Bearer":
|
||||
// FIXME? This gets a new token for every API request;
|
||||
// we may be easily able to reuse a previous token, e.g.
|
||||
// for OpenShift the token only identifies the user and does not vary
|
||||
// across operations. Should we just try the request first, and
|
||||
// only get a new token on failure?
|
||||
// OTOH what to do with the single-use body stream in that case?
|
||||
|
||||
// Try performing the request, expecting it to fail.
|
||||
testReq := *req
|
||||
// Do not use the body stream, or we couldn't reuse it for the "real" call later.
|
||||
testReq.Body = nil
|
||||
testReq.ContentLength = 0
|
||||
res, err := c.client.Do(&testReq)
|
||||
if err != nil {
|
||||
return err
|
||||
case "bearer":
|
||||
if c.token == nil || time.Now().After(c.tokenExpiration) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
token, err := c.getBearerToken(realm, service, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.token = token
|
||||
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
}
|
||||
chs := parseAuthHeader(res.Header)
|
||||
if res.StatusCode != http.StatusUnauthorized || chs == nil || len(chs) == 0 {
|
||||
// no need for bearer? wtf?
|
||||
return nil
|
||||
}
|
||||
// Arbitrarily use the first challenge, there is no reason to expect more than one.
|
||||
challenge := chs[0]
|
||||
if challenge.Scheme != "bearer" { // Another artifact of trying to handle WWW-Authenticate before it actually happens.
|
||||
return fmt.Errorf("Unimplemented: WWW-Authenticate Bearer replaced by %#v", challenge.Scheme)
|
||||
}
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return fmt.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
scope, _ := challenge.Parameters["scope"] // Will be "" if not present
|
||||
token, err := c.getBearerToken(realm, service, scope)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("no handler for %s authentication", tokens[0])
|
||||
// support docker bearer with authconfig's Auth string? see docker2aci
|
||||
return errors.Errorf("no handler for %s authentication", challenge.Scheme)
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(realm, service, scope string) (string, error) {
|
||||
func (c *dockerClient) getBearerToken(realm, service, scope string) (*bearerToken, error) {
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
getParams := authReq.URL.Query()
|
||||
if service != "" {
|
||||
@@ -312,35 +338,33 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
|
||||
client := &http.Client{Transport: tr}
|
||||
res, err := client.Do(authReq)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return "", fmt.Errorf("unable to retrieve auth token: 401 unauthorized")
|
||||
return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized")
|
||||
case http.StatusOK:
|
||||
break
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
|
||||
}
|
||||
tokenBlob, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
tokenStruct := struct {
|
||||
Token string `json:"token"`
|
||||
}{}
|
||||
if err := json.Unmarshal(tokenBlob, &tokenStruct); err != nil {
|
||||
return "", err
|
||||
var token bearerToken
|
||||
if err := json.Unmarshal(tokenBlob, &token); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(runcom): reuse tokens?
|
||||
//hostAuthTokens, ok = rb.hostsV2AuthTokens[req.URL.Host]
|
||||
//if !ok {
|
||||
//hostAuthTokens = make(map[string]string)
|
||||
//rb.hostsV2AuthTokens[req.URL.Host] = hostAuthTokens
|
||||
//}
|
||||
//hostAuthTokens[repo] = tokenStruct.Token
|
||||
return tokenStruct.Token, nil
|
||||
if token.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
token.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
|
||||
}
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
func getAuth(ctx *types.SystemContext, registry string) (string, string, error) {
|
||||
@@ -365,7 +389,7 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
||||
if os.IsNotExist(err) {
|
||||
return "", "", nil
|
||||
}
|
||||
return "", "", fmt.Errorf("%s - %v", oldDockerCfgPath, err)
|
||||
return "", "", errors.Wrap(err, oldDockerCfgPath)
|
||||
}
|
||||
|
||||
j, err := ioutil.ReadFile(oldDockerCfgPath)
|
||||
@@ -377,7 +401,7 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return "", "", fmt.Errorf("%s - %v", dockerCfgPath, err)
|
||||
return "", "", errors.Wrap(err, dockerCfgPath)
|
||||
}
|
||||
|
||||
// I'm feeling lucky
|
||||
@@ -397,50 +421,49 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
|
||||
return "", "", nil
|
||||
}
|
||||
|
||||
type pingResponse struct {
|
||||
WWWAuthenticate string
|
||||
APIVersion string
|
||||
scheme string
|
||||
}
|
||||
// detectProperties detects various properties of the registry.
|
||||
// See the dockerClient documentation for members which are affected by this.
|
||||
func (c *dockerClient) detectProperties() error {
|
||||
if c.scheme != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) ping() (*pingResponse, error) {
|
||||
ping := func(scheme string) (*pingResponse, error) {
|
||||
url := fmt.Sprintf(baseURL, scheme, c.registry)
|
||||
ping := func(scheme string) error {
|
||||
url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v2/", resp.StatusCode)
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return nil, fmt.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
||||
return errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
|
||||
}
|
||||
pr := &pingResponse{}
|
||||
pr.WWWAuthenticate = resp.Header.Get("WWW-Authenticate")
|
||||
pr.APIVersion = resp.Header.Get("Docker-Distribution-Api-Version")
|
||||
pr.scheme = scheme
|
||||
return pr, nil
|
||||
c.challenges = parseAuthHeader(resp.Header)
|
||||
c.scheme = scheme
|
||||
c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
|
||||
return nil
|
||||
}
|
||||
pr, err := ping("https")
|
||||
err := ping("https")
|
||||
if err != nil && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify {
|
||||
pr, err = ping("http")
|
||||
err = ping("http")
|
||||
}
|
||||
if err != nil {
|
||||
err = fmt.Errorf("pinging docker registry returned %+v", err)
|
||||
if c.ctx.DockerDisableV1Ping {
|
||||
return nil, err
|
||||
err = errors.Wrap(err, "pinging docker registry returned")
|
||||
if c.ctx != nil && c.ctx.DockerDisableV1Ping {
|
||||
return err
|
||||
}
|
||||
// best effort to understand if we're talking to a V1 registry
|
||||
pingV1 := func(scheme string) bool {
|
||||
url := fmt.Sprintf(baseURLV1, scheme, c.registry)
|
||||
url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
|
||||
resp, err := c.makeRequestToResolvedURL("GET", url, nil, nil, -1, true)
|
||||
logrus.Debugf("Ping %s err %#v", url, err)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
logrus.Debugf("Ping %s status %d", scheme+"://"+c.registry+"/v1/_ping", resp.StatusCode)
|
||||
logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
|
||||
return false
|
||||
}
|
||||
@@ -454,7 +477,31 @@ func (c *dockerClient) ping() (*pingResponse, error) {
|
||||
err = ErrV1NotSupported
|
||||
}
|
||||
}
|
||||
return pr, err
|
||||
return err
|
||||
}
|
||||
|
||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||
// using the original data structures.
|
||||
func (c *dockerClient) getExtensionsSignatures(ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||
res, err := c.makeRequest("GET", path, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return nil, client.HandleErrorResponse(res)
|
||||
}
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var parsedBody extensionSignatureList
|
||||
if err := json.Unmarshal(body, &parsedBody); err != nil {
|
||||
return nil, errors.Wrapf(err, "Error decoding signature list")
|
||||
}
|
||||
return &parsedBody, nil
|
||||
}
|
||||
|
||||
func getDefaultConfigDir(confPath string) string {
|
||||
|
||||
10
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
10
vendor/github.com/containers/image/docker/docker_image.go
generated
vendored
@@ -5,8 +5,10 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image is a Docker-specific implementation of types.Image with a few extra methods
|
||||
@@ -33,20 +35,20 @@ func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error
|
||||
|
||||
// SourceRefFullName returns a fully expanded name for the repository this image is in.
|
||||
func (i *Image) SourceRefFullName() string {
|
||||
return i.src.ref.ref.FullName()
|
||||
return i.src.ref.ref.Name()
|
||||
}
|
||||
|
||||
// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
|
||||
func (i *Image) GetRepositoryTags() ([]string, error) {
|
||||
url := fmt.Sprintf(tagsURL, i.src.ref.ref.RemoteName())
|
||||
res, err := i.src.c.makeRequest("GET", url, nil, nil)
|
||||
path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
|
||||
res, err := i.src.c.makeRequest("GET", path, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, fmt.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
|
||||
}
|
||||
type tagsRes struct {
|
||||
Tags []string
|
||||
|
||||
244
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
244
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -2,6 +2,8 @@ package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -11,11 +13,28 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var manifestMIMETypes = []string{
|
||||
// TODO(runcom): we'll add OCI as part of another PR here
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
manifest.DockerV2Schema1MediaType,
|
||||
}
|
||||
|
||||
func supportedManifestMIMETypesMap() map[string]bool {
|
||||
m := make(map[string]bool, len(manifestMIMETypes))
|
||||
for _, mt := range manifestMIMETypes {
|
||||
m[mt] = true
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
type dockerImageDestination struct {
|
||||
ref dockerReference
|
||||
c *dockerClient
|
||||
@@ -25,7 +44,7 @@ type dockerImageDestination struct {
|
||||
|
||||
// newImageDestination creates a new ImageDestination for the specified image reference.
|
||||
func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
|
||||
c, err := newDockerClient(ctx, ref, true)
|
||||
c, err := newDockerClient(ctx, ref, true, "pull,push")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -42,22 +61,28 @@ func (d *dockerImageDestination) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *dockerImageDestination) Close() {
|
||||
func (d *dockerImageDestination) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
// TODO(runcom): we'll add OCI as part of another PR here
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
manifest.DockerV2Schema1SignedMediaType,
|
||||
manifest.DockerV2Schema1MediaType,
|
||||
}
|
||||
return manifestMIMETypes
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *dockerImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||
if err := d.c.detectProperties(); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case d.c.signatureBase != nil:
|
||||
return nil
|
||||
case d.c.supportsSignatures:
|
||||
return nil
|
||||
default:
|
||||
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
||||
}
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
@@ -87,52 +112,38 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() != "" {
|
||||
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), inputInfo.Digest.String())
|
||||
|
||||
logrus.Debugf("Checking %s", checkURL)
|
||||
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
||||
haveBlob, size, err := d.HasBlob(inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
logrus.Debugf("... already exists, not uploading")
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: getBlobSize(res)}, nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return types.BlobInfo{}, fmt.Errorf("not authorized to read from destination repository %s", d.ref.ref.RemoteName())
|
||||
case http.StatusNotFound:
|
||||
// noop
|
||||
default:
|
||||
return types.BlobInfo{}, fmt.Errorf("failed to read from destination repository %s: %v", d.ref.ref.RemoteName(), http.StatusText(res.StatusCode))
|
||||
if haveBlob {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
}
|
||||
logrus.Debugf("... failed, status %d", res.StatusCode)
|
||||
}
|
||||
|
||||
// FIXME? Chunked upload, progress reporting, etc.
|
||||
uploadURL := fmt.Sprintf(blobUploadURL, d.ref.ref.RemoteName())
|
||||
logrus.Debugf("Uploading %s", uploadURL)
|
||||
res, err := d.c.makeRequest("POST", uploadURL, nil, nil)
|
||||
uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
|
||||
logrus.Debugf("Uploading %s", uploadPath)
|
||||
res, err := d.c.makeRequest("POST", uploadPath, nil, nil)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusAccepted {
|
||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||
return types.BlobInfo{}, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
||||
return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadPath, res.StatusCode)
|
||||
}
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
digester := digest.Canonical.New()
|
||||
digester := digest.Canonical.Digester()
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", res)
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
@@ -140,7 +151,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
|
||||
uploadLocation, err = res.Location()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure
|
||||
@@ -156,13 +167,48 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||
return types.BlobInfo{}, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
||||
return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest("HEAD", checkPath, nil, nil)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusOK:
|
||||
logrus.Debugf("... already exists")
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref))
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
default:
|
||||
return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
digest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
@@ -170,18 +216,18 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
}
|
||||
d.manifestDigest = digest
|
||||
|
||||
reference, err := d.ref.tagOrDigest()
|
||||
refTail, err := d.ref.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
url := fmt.Sprintf(manifestURL, d.ref.ref.RemoteName(), reference)
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
|
||||
|
||||
headers := map[string][]string{}
|
||||
mimeType := manifest.GuessMIMEType(m)
|
||||
if mimeType != "" {
|
||||
headers["Content-Type"] = []string{mimeType}
|
||||
}
|
||||
res, err := d.c.makeRequest("PUT", url, headers, bytes.NewReader(m))
|
||||
res, err := d.c.makeRequest("PUT", path, headers, bytes.NewReader(m))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -192,12 +238,32 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
logrus.Debugf("Error uploading manifest, status %d, %#v", res.StatusCode, res)
|
||||
return fmt.Errorf("Error uploading manifest to %s, status %d", url, res.StatusCode)
|
||||
return errors.Errorf("Error uploading manifest to %s, status %d", path, res.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
// Do not fail if we don’t really need to support signatures.
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
if err := d.c.detectProperties(); err != nil {
|
||||
return err
|
||||
}
|
||||
switch {
|
||||
case d.c.signatureBase != nil:
|
||||
return d.putSignaturesToLookaside(signatures)
|
||||
case d.c.supportsSignatures:
|
||||
return d.putSignaturesToAPIExtension(signatures)
|
||||
default:
|
||||
return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
|
||||
}
|
||||
}
|
||||
|
||||
// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
|
||||
// FIXME? This overwrites files one at a time, definitely not atomic.
|
||||
// A failure when updating signatures with a reordered copy could lose some of them.
|
||||
|
||||
@@ -205,19 +271,17 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
if d.c.signatureBase == nil {
|
||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported, and there is no applicable signature storage configured")
|
||||
}
|
||||
|
||||
// FIXME: This assumption that signatures are stored after the manifest rather breaks the model.
|
||||
if d.manifestDigest.String() == "" {
|
||||
return fmt.Errorf("Unknown manifest digest, can't add signatures")
|
||||
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
||||
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
for i, signature := range signatures {
|
||||
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||
if url == nil {
|
||||
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
err := d.putOneSignature(url, signature)
|
||||
if err != nil {
|
||||
@@ -232,7 +296,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
for i := len(signatures); ; i++ {
|
||||
url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
|
||||
if url == nil {
|
||||
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
missing, err := d.c.deleteOneSignature(url)
|
||||
if err != nil {
|
||||
@@ -247,6 +311,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
}
|
||||
|
||||
// putOneSignature stores one signature to url.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
@@ -262,14 +327,15 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
|
||||
return nil
|
||||
|
||||
case "http", "https":
|
||||
return fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
default:
|
||||
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String())
|
||||
return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
// deleteOneSignature deletes a signature from url, if it exists.
|
||||
// If it successfully determines that the signature does not exist, returns (true, nil)
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
@@ -281,12 +347,88 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
|
||||
return false, err
|
||||
|
||||
case "http", "https":
|
||||
return false, fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
|
||||
default:
|
||||
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String())
|
||||
return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||
func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte) error {
|
||||
// Skip dealing with the manifest digest, or reading the old state, if not necessary.
|
||||
if len(signatures) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.manifestDigest.String() == "" {
|
||||
// This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
|
||||
return errors.Errorf("Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
|
||||
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||
// always adds signatures. Eventually we should also allow removing signatures,
|
||||
// but the X-Registry-Supports-Signatures API extension does not support that yet.
|
||||
|
||||
existingSignatures, err := d.c.getExtensionsSignatures(d.ref, d.manifestDigest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingSigNames := map[string]struct{}{}
|
||||
for _, sig := range existingSignatures.Signatures {
|
||||
existingSigNames[sig.Name] = struct{}{}
|
||||
}
|
||||
|
||||
sigExists:
|
||||
for _, newSig := range signatures {
|
||||
for _, existingSig := range existingSignatures.Signatures {
|
||||
if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
|
||||
continue sigExists
|
||||
}
|
||||
}
|
||||
|
||||
// The API expect us to invent a new unique name. This is racy, but hopefully good enough.
|
||||
var signatureName string
|
||||
for {
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
sig := extensionSignature{
|
||||
Version: extensionSignatureSchemaVersion,
|
||||
Name: signatureName,
|
||||
Type: extensionSignatureTypeAtomic,
|
||||
Content: newSig,
|
||||
}
|
||||
body, err := json.Marshal(sig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
|
||||
res, err := d.c.makeRequest("PUT", path, nil, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusCreated {
|
||||
body, err := ioutil.ReadAll(res.Body)
|
||||
if err == nil {
|
||||
logrus.Debugf("Error body %s", string(body))
|
||||
}
|
||||
logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
|
||||
return errors.Errorf("Error uploading signature to %s, status %d", path, res.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
|
||||
100
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
100
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@@ -11,10 +11,12 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerImageSource struct {
|
||||
@@ -31,13 +33,24 @@ type dockerImageSource struct {
|
||||
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||
// The caller must call .Close() on the returned ImageSource.
|
||||
func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) {
|
||||
c, err := newDockerClient(ctx, ref, false)
|
||||
c, err := newDockerClient(ctx, ref, false, "pull")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if requestedManifestMIMETypes == nil {
|
||||
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
||||
}
|
||||
supportedMIMEs := supportedManifestMIMETypesMap()
|
||||
acceptableRequestedMIMEs := false
|
||||
for _, mtrequested := range requestedManifestMIMETypes {
|
||||
if supportedMIMEs[mtrequested] {
|
||||
acceptableRequestedMIMEs = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !acceptableRequestedMIMEs {
|
||||
requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
|
||||
}
|
||||
return &dockerImageSource{
|
||||
ref: ref,
|
||||
requestedManifestMIMETypes: requestedManifestMIMETypes,
|
||||
@@ -52,7 +65,8 @@ func (s *dockerImageSource) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *dockerImageSource) Close() {
|
||||
func (s *dockerImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
|
||||
@@ -79,10 +93,10 @@ func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, error) {
|
||||
url := fmt.Sprintf(manifestURL, s.ref.ref.RemoteName(), tagOrDigest)
|
||||
path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = s.requestedManifestMIMETypes
|
||||
res, err := s.c.makeRequest("GET", url, headers, nil)
|
||||
res, err := s.c.makeRequest("GET", path, headers, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
@@ -139,7 +153,7 @@ func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64
|
||||
resp, err = s.c.makeRequestToResolvedURL("GET", url, nil, nil, -1, false)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
|
||||
logrus.Debug(err)
|
||||
continue
|
||||
}
|
||||
@@ -165,24 +179,36 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64,
|
||||
return s.getExternalBlob(info.URLs)
|
||||
}
|
||||
|
||||
url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", url)
|
||||
res, err := s.c.makeRequest("GET", url, nil, nil)
|
||||
path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
|
||||
logrus.Debugf("Downloading %s", path)
|
||||
res, err := s.c.makeRequest("GET", path, nil, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
if res.StatusCode != http.StatusOK {
|
||||
// print url also
|
||||
return nil, 0, fmt.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
|
||||
}
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||
if s.c.signatureBase == nil { // Skip dealing with the manifest digest if not necessary.
|
||||
if err := s.c.detectProperties(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch {
|
||||
case s.c.signatureBase != nil:
|
||||
return s.getSignaturesFromLookaside()
|
||||
case s.c.supportsSignatures:
|
||||
return s.getSignaturesFromAPIExtension()
|
||||
default:
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
|
||||
// which is not nil.
|
||||
func (s *dockerImageSource) getSignaturesFromLookaside() ([][]byte, error) {
|
||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -191,11 +217,12 @@ func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
signatures := [][]byte{}
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||
if url == nil {
|
||||
return nil, fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
signature, missing, err := s.getOneSignature(url)
|
||||
if err != nil {
|
||||
@@ -211,6 +238,7 @@ func (s *dockerImageSource) GetSignatures() ([][]byte, error) {
|
||||
|
||||
// getOneSignature downloads one signature from url.
|
||||
// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, missing bool, err error) {
|
||||
switch url.Scheme {
|
||||
case "file":
|
||||
@@ -234,7 +262,7 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return nil, true, nil
|
||||
} else if res.StatusCode != http.StatusOK {
|
||||
return nil, false, fmt.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
|
||||
}
|
||||
sig, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
@@ -243,13 +271,37 @@ func (s *dockerImageSource) getOneSignature(url *url.URL) (signature []byte, mis
|
||||
return sig, false, nil
|
||||
|
||||
default:
|
||||
return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", url.String())
|
||||
return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
|
||||
}
|
||||
}
|
||||
|
||||
// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
|
||||
func (s *dockerImageSource) getSignaturesFromAPIExtension() ([][]byte, error) {
|
||||
if err := s.ensureManifestIsLoaded(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
manifestDigest, err := manifest.Digest(s.cachedManifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
parsedBody, err := s.c.getExtensionsSignatures(s.ref, manifestDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var sigs [][]byte
|
||||
for _, sig := range parsedBody.Signatures {
|
||||
if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
|
||||
sigs = append(sigs, sig.Content)
|
||||
}
|
||||
}
|
||||
return sigs, nil
|
||||
}
|
||||
|
||||
// deleteImage deletes the named image from the registry, if supported.
|
||||
func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
c, err := newDockerClient(ctx, ref, true)
|
||||
c, err := newDockerClient(ctx, ref, true, "push")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -259,12 +311,12 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
headers := make(map[string][]string)
|
||||
headers["Accept"] = []string{manifest.DockerV2Schema2MediaType}
|
||||
|
||||
reference, err := ref.tagOrDigest()
|
||||
refTail, err := ref.tagOrDigest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
getURL := fmt.Sprintf(manifestURL, ref.ref.RemoteName(), reference)
|
||||
get, err := c.makeRequest("GET", getURL, headers, nil)
|
||||
getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
|
||||
get, err := c.makeRequest("GET", getPath, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -276,17 +328,17 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
switch get.StatusCode {
|
||||
case http.StatusOK:
|
||||
case http.StatusNotFound:
|
||||
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
|
||||
return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
|
||||
default:
|
||||
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
|
||||
}
|
||||
|
||||
digest := get.Header.Get("Docker-Content-Digest")
|
||||
deleteURL := fmt.Sprintf(manifestURL, ref.ref.RemoteName(), digest)
|
||||
deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
|
||||
|
||||
// When retrieving the digest from a registry >= 2.3 use the following header:
|
||||
// "Accept": "application/vnd.docker.distribution.manifest.v2+json"
|
||||
delete, err := c.makeRequest("DELETE", deleteURL, headers, nil)
|
||||
delete, err := c.makeRequest("DELETE", deletePath, headers, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -297,7 +349,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
return err
|
||||
}
|
||||
if delete.StatusCode != http.StatusAccepted {
|
||||
return fmt.Errorf("Failed to delete %v: %s (%v)", deleteURL, string(body), delete.Status)
|
||||
return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
|
||||
}
|
||||
|
||||
if c.signatureBase != nil {
|
||||
@@ -309,7 +361,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
|
||||
for i := 0; ; i++ {
|
||||
url := signatureStorageURL(c.signatureBase, manifestDigest, i)
|
||||
if url == nil {
|
||||
return fmt.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
|
||||
}
|
||||
missing, err := c.deleteOneSignature(url)
|
||||
if err != nil {
|
||||
|
||||
23
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
23
vendor/github.com/containers/image/docker/docker_transport.go
generated
vendored
@@ -6,9 +6,15 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/policyconfiguration"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for Docker registry-hosted images.
|
||||
var Transport = dockerTransport{}
|
||||
|
||||
@@ -42,29 +48,30 @@ type dockerReference struct {
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||
func ParseReference(refString string) (types.ImageReference, error) {
|
||||
if !strings.HasPrefix(refString, "//") {
|
||||
return nil, fmt.Errorf("docker: image reference %s does not start with //", refString)
|
||||
return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
|
||||
}
|
||||
ref, err := reference.ParseNamed(strings.TrimPrefix(refString, "//"))
|
||||
ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref = reference.WithDefaultTag(ref)
|
||||
ref = reference.TagNameOnly(ref)
|
||||
return NewReference(ref)
|
||||
}
|
||||
|
||||
// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
|
||||
func NewReference(ref reference.Named) (types.ImageReference, error) {
|
||||
if reference.IsNameOnly(ref) {
|
||||
return nil, fmt.Errorf("Docker reference %s has neither a tag nor a digest", ref.String())
|
||||
return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
}
|
||||
// A github.com/distribution/reference value can have a tag and a digest at the same time!
|
||||
// docker/reference does not handle that, so fail.
|
||||
// The docker/distribution API does not really support that (we can’t ask for an image with a specific
|
||||
// tag and digest), so fail. This MAY be accepted in the future.
|
||||
// (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
|
||||
// the tag or the digest first?)
|
||||
_, isTagged := ref.(reference.NamedTagged)
|
||||
_, isDigested := ref.(reference.Canonical)
|
||||
if isTagged && isDigested {
|
||||
return nil, fmt.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
|
||||
}
|
||||
return dockerReference{
|
||||
ref: ref,
|
||||
@@ -81,7 +88,7 @@ func (ref dockerReference) Transport() types.ImageTransport {
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref dockerReference) StringWithinTransport() string {
|
||||
return "//" + ref.ref.String()
|
||||
return "//" + reference.FamiliarString(ref.ref)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
@@ -151,5 +158,5 @@ func (ref dockerReference) tagOrDigest() (string, error) {
|
||||
return ref.Tag(), nil
|
||||
}
|
||||
// This should not happen, NewReference above refuses reference.IsNameOnly values.
|
||||
return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", ref.ref.String())
|
||||
return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
|
||||
}
|
||||
|
||||
25
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
25
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
@@ -9,11 +9,12 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
|
||||
@@ -60,12 +61,13 @@ func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReferenc
|
||||
|
||||
url, err := url.Parse(topLevel)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid signature storage URL %s: %v", topLevel, err)
|
||||
return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
|
||||
}
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
// FIXME? Restrict to explicitly supported schemes?
|
||||
repo := ref.ref.FullName() // Note that this is without a tag or digest.
|
||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||
return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
||||
repo := reference.Path(ref.ref) // Note that this is without a tag or digest.
|
||||
if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
|
||||
return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
|
||||
}
|
||||
url.Path = url.Path + "/" + repo
|
||||
return url, nil
|
||||
@@ -114,12 +116,12 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
var config registryConfiguration
|
||||
err = yaml.Unmarshal(configBytes, &config)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing %s: %v", configPath, err)
|
||||
return nil, errors.Wrapf(err, "Error parsing %s", configPath)
|
||||
}
|
||||
|
||||
if config.DefaultDocker != nil {
|
||||
if mergedConfig.DefaultDocker != nil {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
|
||||
dockerDefaultMergedFrom, configPath)
|
||||
}
|
||||
mergedConfig.DefaultDocker = config.DefaultDocker
|
||||
@@ -128,7 +130,7 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
|
||||
for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
|
||||
if _, ok := mergedConfig.Docker[nsName]; ok {
|
||||
return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
|
||||
nsName, nsMergedFrom[nsName], configPath)
|
||||
}
|
||||
mergedConfig.Docker[nsName] = nsConfig
|
||||
@@ -189,11 +191,12 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
||||
|
||||
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
|
||||
// Returns nil iff base == nil.
|
||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||
func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
||||
if base == nil {
|
||||
return nil
|
||||
}
|
||||
url := *base
|
||||
url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest.String(), index+1)
|
||||
url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||
return &url
|
||||
}
|
||||
|
||||
13
vendor/github.com/containers/image/docker/policyconfiguration/naming.go
generated
vendored
13
vendor/github.com/containers/image/docker/policyconfiguration/naming.go
generated
vendored
@@ -1,25 +1,24 @@
|
||||
package policyconfiguration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
|
||||
// as a backend for ImageReference.PolicyConfigurationIdentity.
|
||||
// The reference must satisfy !reference.IsNameOnly().
|
||||
func DockerReferenceIdentity(ref reference.Named) (string, error) {
|
||||
res := ref.FullName()
|
||||
res := ref.Name()
|
||||
tagged, isTagged := ref.(reference.NamedTagged)
|
||||
digested, isDigested := ref.(reference.Canonical)
|
||||
switch {
|
||||
case isTagged && isDigested: // This should not happen, docker/reference.ParseNamed drops the tag.
|
||||
return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", ref.String())
|
||||
case isTagged && isDigested: // Note that this CAN actually happen.
|
||||
return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
|
||||
case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
|
||||
return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", ref.String())
|
||||
return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
|
||||
case isTagged:
|
||||
res = res + ":" + tagged.Tag()
|
||||
case isDigested:
|
||||
@@ -43,7 +42,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string {
|
||||
// ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
|
||||
// iteration matches the host name (for any namespace).
|
||||
res := []string{}
|
||||
name := ref.FullName()
|
||||
name := ref.Name()
|
||||
for {
|
||||
res = append(res, name)
|
||||
|
||||
|
||||
2
vendor/github.com/containers/image/docker/reference/README.md
generated
vendored
Normal file
2
vendor/github.com/containers/image/docker/reference/README.md
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8,
|
||||
except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset.
|
||||
6
vendor/github.com/containers/image/docker/reference/doc.go
generated
vendored
6
vendor/github.com/containers/image/docker/reference/doc.go
generated
vendored
@@ -1,6 +0,0 @@
|
||||
// Package reference is a fork of the upstream docker/docker/reference package.
|
||||
// The package is forked because we need consistency especially when storing and
|
||||
// checking signatures (RH patches break this consistency because they modify
|
||||
// docker/docker/reference as part of a patch carried in projectatomic/docker).
|
||||
// The version of this package is v1.12.1 from upstream, update as necessary.
|
||||
package reference
|
||||
42
vendor/github.com/containers/image/docker/reference/helpers.go
generated
vendored
Normal file
42
vendor/github.com/containers/image/docker/reference/helpers.go
generated
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
package reference
|
||||
|
||||
import "path"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
func FamiliarName(ref Named) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().Name()
|
||||
}
|
||||
return ref.Name()
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
func FamiliarString(ref Reference) string {
|
||||
if nn, ok := ref.(normalizedNamed); ok {
|
||||
return nn.Familiar().String()
|
||||
}
|
||||
return ref.String()
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See https://godoc.org/path#Match for supported patterns.
|
||||
func FamiliarMatch(pattern string, ref Reference) (bool, error) {
|
||||
matched, err := path.Match(pattern, FamiliarString(ref))
|
||||
if namedRef, isNamed := ref.(Named); isNamed && !matched {
|
||||
matched, _ = path.Match(pattern, FamiliarName(namedRef))
|
||||
}
|
||||
return matched, err
|
||||
}
|
||||
152
vendor/github.com/containers/image/docker/reference/normalize.go
generated
vendored
Normal file
152
vendor/github.com/containers/image/docker/reference/normalize.go
generated
vendored
Normal file
@@ -0,0 +1,152 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
legacyDefaultDomain = "index.docker.io"
|
||||
defaultDomain = "docker.io"
|
||||
officialRepoName = "library"
|
||||
defaultTag = "latest"
|
||||
)
|
||||
|
||||
// normalizedNamed represents a name which has been
|
||||
// normalized and has a familiar form. A familiar name
|
||||
// is what is used in Docker UI. An example normalized
|
||||
// name is "docker.io/library/ubuntu" and corresponding
|
||||
// familiar name of "ubuntu".
|
||||
type normalizedNamed interface {
|
||||
Named
|
||||
Familiar() Named
|
||||
}
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
func ParseNormalizedNamed(s string) (Named, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(s); ok {
|
||||
return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
|
||||
}
|
||||
domain, remainder := splitDockerDomain(s)
|
||||
var remoteName string
|
||||
if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
|
||||
remoteName = remainder[:tagSep]
|
||||
} else {
|
||||
remoteName = remainder
|
||||
}
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return nil, errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
|
||||
ref, err := Parse(domain + "/" + remainder)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
named, isNamed := ref.(Named)
|
||||
if !isNamed {
|
||||
return nil, fmt.Errorf("reference %s has no name", ref.String())
|
||||
}
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// splitDockerDomain splits a repository name to domain and remotename string.
|
||||
// If no valid domain is found, the default domain is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitDockerDomain(name string) (domain, remainder string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
domain, remainder = defaultDomain, name
|
||||
} else {
|
||||
domain, remainder = name[:i], name[i+1:]
|
||||
}
|
||||
if domain == legacyDefaultDomain {
|
||||
domain = defaultDomain
|
||||
}
|
||||
if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
|
||||
remainder = officialRepoName + "/" + remainder
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// familiarizeName returns a shortened version of the name familiar
|
||||
// to to the Docker UI. Familiar names have the default domain
|
||||
// "docker.io" and "library/" repository prefix removed.
|
||||
// For example, "docker.io/library/redis" will have the familiar
|
||||
// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
|
||||
// Returns a familiarized named only reference.
|
||||
func familiarizeName(named namedRepository) repository {
|
||||
repo := repository{
|
||||
domain: named.Domain(),
|
||||
path: named.Path(),
|
||||
}
|
||||
|
||||
if repo.domain == defaultDomain {
|
||||
repo.domain = ""
|
||||
// Handle official repositories which have the pattern "library/<official repo name>"
|
||||
if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
|
||||
repo.path = split[1]
|
||||
}
|
||||
}
|
||||
return repo
|
||||
}
|
||||
|
||||
func (r reference) Familiar() Named {
|
||||
return reference{
|
||||
namedRepository: familiarizeName(r.namedRepository),
|
||||
tag: r.tag,
|
||||
digest: r.digest,
|
||||
}
|
||||
}
|
||||
|
||||
func (r repository) Familiar() Named {
|
||||
return familiarizeName(r)
|
||||
}
|
||||
|
||||
func (t taggedReference) Familiar() Named {
|
||||
return taggedReference{
|
||||
namedRepository: familiarizeName(t.namedRepository),
|
||||
tag: t.tag,
|
||||
}
|
||||
}
|
||||
|
||||
func (c canonicalReference) Familiar() Named {
|
||||
return canonicalReference{
|
||||
namedRepository: familiarizeName(c.namedRepository),
|
||||
digest: c.digest,
|
||||
}
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
func TagNameOnly(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
namedTagged, err := WithTag(ref, defaultTag)
|
||||
if err != nil {
|
||||
// Default tag must be valid, to create a NamedTagged
|
||||
// type with non-validated input the WithTag function
|
||||
// should be used instead
|
||||
panic(err)
|
||||
}
|
||||
return namedTagged
|
||||
}
|
||||
return ref
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
func ParseAnyReference(ref string) (Reference, error) {
|
||||
if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
|
||||
return digestReference("sha256:" + ref), nil
|
||||
}
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
|
||||
return ParseNormalizedNamed(ref)
|
||||
}
|
||||
517
vendor/github.com/containers/image/docker/reference/reference.go
generated
vendored
517
vendor/github.com/containers/image/docker/reference/reference.go
generated
vendored
@@ -1,38 +1,120 @@
|
||||
// Package reference provides a general type to represent any way of referencing images within the registry.
|
||||
// Its main purpose is to abstract tags and digests (content-addressable hash).
|
||||
//
|
||||
// Grammar
|
||||
//
|
||||
// reference := name [ ":" tag ] [ "@" digest ]
|
||||
// name := [domain '/'] path-component ['/' path-component]*
|
||||
// domain := domain-component ['.' domain-component]* [':' port-number]
|
||||
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
|
||||
// port-number := /[0-9]+/
|
||||
// path-component := alpha-numeric [separator alpha-numeric]*
|
||||
// alpha-numeric := /[a-z0-9]+/
|
||||
// separator := /[_.]|__|[-]*/
|
||||
//
|
||||
// tag := /[\w][\w.-]{0,127}/
|
||||
//
|
||||
// digest := digest-algorithm ":" digest-hex
|
||||
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
|
||||
// digest-algorithm-separator := /[+.-_]/
|
||||
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
|
||||
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
|
||||
//
|
||||
// identifier := /[a-f0-9]{64}/
|
||||
// short-identifier := /[a-f0-9]{6,64}/
|
||||
package reference
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
distreference "github.com/docker/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified
|
||||
DefaultTag = "latest"
|
||||
// DefaultHostname is the default built-in hostname
|
||||
DefaultHostname = "docker.io"
|
||||
// LegacyDefaultHostname is automatically converted to DefaultHostname
|
||||
LegacyDefaultHostname = "index.docker.io"
|
||||
// DefaultRepoPrefix is the prefix used for default repositories in default host
|
||||
DefaultRepoPrefix = "library/"
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
NameTotalLengthMax = 255
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
ErrReferenceInvalidFormat = errors.New("invalid reference format")
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrTagInvalidFormat = errors.New("invalid tag format")
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
ErrDigestInvalidFormat = errors.New("invalid digest format")
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
ErrNameContainsUppercase = errors.New("repository name must be lowercase")
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
ErrNameEmpty = errors.New("repository name must have at least one component")
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
ErrNameNotCanonical = errors.New("repository name must be canonical")
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
type Reference interface {
|
||||
// String returns the full reference
|
||||
String() string
|
||||
}
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
type Field struct {
|
||||
reference Reference
|
||||
}
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
func AsField(reference Reference) Field {
|
||||
return Field{reference}
|
||||
}
|
||||
|
||||
// Reference unwraps the reference type from the field to
|
||||
// return the Reference object. This object should be
|
||||
// of the appropriate type to further check for different
|
||||
// reference types.
|
||||
func (f Field) Reference() Reference {
|
||||
return f.reference
|
||||
}
|
||||
|
||||
// MarshalText serializes the field to byte text which
|
||||
// is the string of the reference.
|
||||
func (f Field) MarshalText() (p []byte, err error) {
|
||||
return []byte(f.reference.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText parses text bytes by invoking the
|
||||
// reference parser to ensure the appropriately
|
||||
// typed reference object is wrapped by field.
|
||||
func (f *Field) UnmarshalText(p []byte) error {
|
||||
r, err := Parse(string(p))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.reference = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
type Named interface {
|
||||
// Name returns normalized repository name, like "ubuntu".
|
||||
Reference
|
||||
Name() string
|
||||
// String returns full reference, like "ubuntu@sha256:abcdef..."
|
||||
String() string
|
||||
// FullName returns full repository name with hostname, like "docker.io/library/ubuntu"
|
||||
FullName() string
|
||||
// Hostname returns hostname for the reference, like "docker.io"
|
||||
Hostname() string
|
||||
// RemoteName returns the repository component of the full name, like "library/ubuntu"
|
||||
RemoteName() string
|
||||
}
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
type Tagged interface {
|
||||
Reference
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
@@ -41,180 +123,311 @@ type NamedTagged interface {
|
||||
Tag() string
|
||||
}
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
type Digested interface {
|
||||
Reference
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with hostname and digest
|
||||
// name including a name with domain and digest
|
||||
type Canonical interface {
|
||||
Named
|
||||
Digest() digest.Digest
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name, otherwise an error is
|
||||
// returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := distreference.ParseNamed(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s)
|
||||
// namedRepository is a reference to a repository with a name.
|
||||
// A namedRepository has both domain and path components.
|
||||
type namedRepository interface {
|
||||
Named
|
||||
Domain() string
|
||||
Path() string
|
||||
}
|
||||
|
||||
// Domain returns the domain part of the Named reference
|
||||
func Domain(named Named) string {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain()
|
||||
}
|
||||
r, err := WithName(named.Name())
|
||||
domain, _ := splitDomain(named.Name())
|
||||
return domain
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the Named reference
|
||||
func Path(named Named) (name string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Path()
|
||||
}
|
||||
_, path := splitDomain(named.Name())
|
||||
return path
|
||||
}
|
||||
|
||||
func splitDomain(name string) (string, string) {
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if len(match) != 3 {
|
||||
return "", name
|
||||
}
|
||||
return match[1], match[2]
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
// DEPRECATED: Use Domain or Path
|
||||
func SplitHostname(named Named) (string, string) {
|
||||
if r, ok := named.(namedRepository); ok {
|
||||
return r.Domain(), r.Path()
|
||||
}
|
||||
return splitDomain(named.Name())
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: Parse will not handle short digests.
|
||||
func Parse(s string) (Reference, error) {
|
||||
matches := ReferenceRegexp.FindStringSubmatch(s)
|
||||
if matches == nil {
|
||||
if s == "" {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
|
||||
return nil, ErrNameContainsUppercase
|
||||
}
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
|
||||
if len(matches[1]) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
|
||||
var repo repository
|
||||
|
||||
nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
|
||||
if nameMatch != nil && len(nameMatch) == 3 {
|
||||
repo.domain = nameMatch[1]
|
||||
repo.path = nameMatch[2]
|
||||
} else {
|
||||
repo.domain = ""
|
||||
repo.path = matches[1]
|
||||
}
|
||||
|
||||
ref := reference{
|
||||
namedRepository: repo,
|
||||
tag: matches[2],
|
||||
}
|
||||
if matches[3] != "" {
|
||||
var err error
|
||||
ref.digest, err = digest.Parse(matches[3])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
r := getBestReferenceType(ref)
|
||||
if r == nil {
|
||||
return nil, ErrNameEmpty
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
// NOTE: ParseNamed will not handle short digests.
|
||||
func ParseNamed(s string) (Named, error) {
|
||||
named, err := ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if canonical, isCanonical := named.(distreference.Canonical); isCanonical {
|
||||
return WithDigest(r, canonical.Digest())
|
||||
if named.String() != s {
|
||||
return nil, ErrNameNotCanonical
|
||||
}
|
||||
if tagged, isTagged := named.(distreference.NamedTagged); isTagged {
|
||||
return WithTag(r, tagged.Tag())
|
||||
}
|
||||
return r, nil
|
||||
return named, nil
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
func WithName(name string) (Named, error) {
|
||||
name, err := normalize(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(name) > NameTotalLengthMax {
|
||||
return nil, ErrNameTooLong
|
||||
}
|
||||
if err := validateName(name); err != nil {
|
||||
return nil, err
|
||||
|
||||
match := anchoredNameRegexp.FindStringSubmatch(name)
|
||||
if match == nil || len(match) != 3 {
|
||||
return nil, ErrReferenceInvalidFormat
|
||||
}
|
||||
r, err := distreference.WithName(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &namedRef{r}, nil
|
||||
return repository{
|
||||
domain: match[1],
|
||||
path: match[2],
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
func WithTag(name Named, tag string) (NamedTagged, error) {
|
||||
r, err := distreference.WithTag(name, tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !anchoredTagRegexp.MatchString(tag) {
|
||||
return nil, ErrTagInvalidFormat
|
||||
}
|
||||
return &taggedRef{namedRef{r}}, nil
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if canonical, ok := name.(Canonical); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
digest: canonical.Digest(),
|
||||
}, nil
|
||||
}
|
||||
return taggedReference{
|
||||
namedRepository: repo,
|
||||
tag: tag,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
|
||||
r, err := distreference.WithDigest(name, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !anchoredDigestRegexp.MatchString(digest.String()) {
|
||||
return nil, ErrDigestInvalidFormat
|
||||
}
|
||||
return &canonicalRef{namedRef{r}}, nil
|
||||
}
|
||||
|
||||
type namedRef struct {
|
||||
distreference.Named
|
||||
}
|
||||
type taggedRef struct {
|
||||
namedRef
|
||||
}
|
||||
type canonicalRef struct {
|
||||
namedRef
|
||||
}
|
||||
|
||||
func (r *namedRef) FullName() string {
|
||||
hostname, remoteName := splitHostname(r.Name())
|
||||
return hostname + "/" + remoteName
|
||||
}
|
||||
func (r *namedRef) Hostname() string {
|
||||
hostname, _ := splitHostname(r.Name())
|
||||
return hostname
|
||||
}
|
||||
func (r *namedRef) RemoteName() string {
|
||||
_, remoteName := splitHostname(r.Name())
|
||||
return remoteName
|
||||
}
|
||||
func (r *taggedRef) Tag() string {
|
||||
return r.namedRef.Named.(distreference.NamedTagged).Tag()
|
||||
}
|
||||
func (r *canonicalRef) Digest() digest.Digest {
|
||||
return r.namedRef.Named.(distreference.Canonical).Digest()
|
||||
}
|
||||
|
||||
// WithDefaultTag adds a default tag to a reference if it only has a repo name.
|
||||
func WithDefaultTag(ref Named) Named {
|
||||
if IsNameOnly(ref) {
|
||||
ref, _ = WithTag(ref, DefaultTag)
|
||||
var repo repository
|
||||
if r, ok := name.(namedRepository); ok {
|
||||
repo.domain = r.Domain()
|
||||
repo.path = r.Path()
|
||||
} else {
|
||||
repo.path = name.Name()
|
||||
}
|
||||
if tagged, ok := name.(Tagged); ok {
|
||||
return reference{
|
||||
namedRepository: repo,
|
||||
tag: tagged.Tag(),
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
return canonicalReference{
|
||||
namedRepository: repo,
|
||||
digest: digest,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
func TrimNamed(ref Named) Named {
|
||||
domain, path := SplitHostname(ref)
|
||||
return repository{
|
||||
domain: domain,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
func getBestReferenceType(ref reference) Reference {
|
||||
if ref.Name() == "" {
|
||||
// Allow digest only references
|
||||
if ref.digest != "" {
|
||||
return digestReference(ref.digest)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if ref.tag == "" {
|
||||
if ref.digest != "" {
|
||||
return canonicalReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
digest: ref.digest,
|
||||
}
|
||||
}
|
||||
return ref.namedRepository
|
||||
}
|
||||
if ref.digest == "" {
|
||||
return taggedReference{
|
||||
namedRepository: ref.namedRepository,
|
||||
tag: ref.tag,
|
||||
}
|
||||
}
|
||||
|
||||
return ref
|
||||
}
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
func IsNameOnly(ref Named) bool {
|
||||
if _, ok := ref.(NamedTagged); ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := ref.(Canonical); ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
type reference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
// ParseIDOrReference parses string for an image ID or a reference. ID can be
|
||||
// without a default prefix.
|
||||
func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) {
|
||||
if err := validateID(idOrRef); err == nil {
|
||||
idOrRef = "sha256:" + idOrRef
|
||||
}
|
||||
if dgst, err := digest.ParseDigest(idOrRef); err == nil {
|
||||
return dgst, nil, nil
|
||||
}
|
||||
ref, err := ParseNamed(idOrRef)
|
||||
return "", ref, err
|
||||
func (r reference) String() string {
|
||||
return r.Name() + ":" + r.tag + "@" + r.digest.String()
|
||||
}
|
||||
|
||||
// splitHostname splits a repository name to hostname and remotename string.
|
||||
// If no valid hostname is found, the default hostname is used. Repository name
|
||||
// needs to be already validated before.
|
||||
func splitHostname(name string) (hostname, remoteName string) {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
|
||||
hostname, remoteName = DefaultHostname, name
|
||||
} else {
|
||||
hostname, remoteName = name[:i], name[i+1:]
|
||||
}
|
||||
if hostname == LegacyDefaultHostname {
|
||||
hostname = DefaultHostname
|
||||
}
|
||||
if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') {
|
||||
remoteName = DefaultRepoPrefix + remoteName
|
||||
}
|
||||
return
|
||||
func (r reference) Tag() string {
|
||||
return r.tag
|
||||
}
|
||||
|
||||
// normalize returns a repository name in its normalized form, meaning it
|
||||
// will not contain default hostname nor library/ prefix for official images.
|
||||
func normalize(name string) (string, error) {
|
||||
host, remoteName := splitHostname(name)
|
||||
if strings.ToLower(remoteName) != remoteName {
|
||||
return "", errors.New("invalid reference format: repository name must be lowercase")
|
||||
}
|
||||
if host == DefaultHostname {
|
||||
if strings.HasPrefix(remoteName, DefaultRepoPrefix) {
|
||||
return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil
|
||||
}
|
||||
return remoteName, nil
|
||||
}
|
||||
return name, nil
|
||||
func (r reference) Digest() digest.Digest {
|
||||
return r.digest
|
||||
}
|
||||
|
||||
var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||
|
||||
func validateID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
type repository struct {
|
||||
domain string
|
||||
path string
|
||||
}
|
||||
|
||||
func validateName(name string) error {
|
||||
if err := validateID(name); err == nil {
|
||||
return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name)
|
||||
}
|
||||
return nil
|
||||
func (r repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
func (r repository) Name() string {
|
||||
if r.domain == "" {
|
||||
return r.path
|
||||
}
|
||||
return r.domain + "/" + r.path
|
||||
}
|
||||
|
||||
func (r repository) Domain() string {
|
||||
return r.domain
|
||||
}
|
||||
|
||||
func (r repository) Path() string {
|
||||
return r.path
|
||||
}
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
type taggedReference struct {
|
||||
namedRepository
|
||||
tag string
|
||||
}
|
||||
|
||||
func (t taggedReference) String() string {
|
||||
return t.Name() + ":" + t.tag
|
||||
}
|
||||
|
||||
func (t taggedReference) Tag() string {
|
||||
return t.tag
|
||||
}
|
||||
|
||||
type canonicalReference struct {
|
||||
namedRepository
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func (c canonicalReference) String() string {
|
||||
return c.Name() + "@" + c.digest.String()
|
||||
}
|
||||
|
||||
func (c canonicalReference) Digest() digest.Digest {
|
||||
return c.digest
|
||||
}
|
||||
|
||||
143
vendor/github.com/containers/image/docker/reference/regexp.go
generated
vendored
Normal file
143
vendor/github.com/containers/image/docker/reference/regexp.go
generated
vendored
Normal file
@@ -0,0 +1,143 @@
|
||||
package reference
|
||||
|
||||
import "regexp"
|
||||
|
||||
var (
|
||||
// alphaNumericRegexp defines the alpha numeric atom, typically a
|
||||
// component of names. This only allows lower case characters and digits.
|
||||
alphaNumericRegexp = match(`[a-z0-9]+`)
|
||||
|
||||
// separatorRegexp defines the separators allowed to be embedded in name
|
||||
// components. This allow one period, one or two underscore and multiple
|
||||
// dashes.
|
||||
separatorRegexp = match(`(?:[._]|__|[-]*)`)
|
||||
|
||||
// nameComponentRegexp restricts registry path component names to start
|
||||
// with at least one letter or number, with following parts able to be
|
||||
// separated by one period, one or two underscore and multiple dashes.
|
||||
nameComponentRegexp = expression(
|
||||
alphaNumericRegexp,
|
||||
optional(repeated(separatorRegexp, alphaNumericRegexp)))
|
||||
|
||||
// domainComponentRegexp restricts the registry domain component of a
|
||||
// repository name to start with a component as defined by domainRegexp
|
||||
// and followed by an optional port.
|
||||
domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
|
||||
|
||||
// domainRegexp defines the structure of potential domain components
|
||||
// that may be part of image names. This is purposely a subset of what is
|
||||
// allowed by DNS to ensure backwards compatibility with Docker image
|
||||
// names.
|
||||
domainRegexp = expression(
|
||||
domainComponentRegexp,
|
||||
optional(repeated(literal(`.`), domainComponentRegexp)),
|
||||
optional(literal(`:`), match(`[0-9]+`)))
|
||||
|
||||
// TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
|
||||
TagRegexp = match(`[\w][\w.-]{0,127}`)
|
||||
|
||||
// anchoredTagRegexp matches valid tag names, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredTagRegexp = anchored(TagRegexp)
|
||||
|
||||
// DigestRegexp matches valid digests.
|
||||
DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
|
||||
|
||||
// anchoredDigestRegexp matches valid digests, anchored at the start and
|
||||
// end of the matched string.
|
||||
anchoredDigestRegexp = anchored(DigestRegexp)
|
||||
|
||||
// NameRegexp is the format for the name component of references. The
|
||||
// regexp has capturing groups for the domain and name part omitting
|
||||
// the separating forward slash from either.
|
||||
NameRegexp = expression(
|
||||
optional(domainRegexp, literal(`/`)),
|
||||
nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp)))
|
||||
|
||||
// anchoredNameRegexp is used to parse a name value, capturing the
|
||||
// domain and trailing components.
|
||||
anchoredNameRegexp = anchored(
|
||||
optional(capture(domainRegexp), literal(`/`)),
|
||||
capture(nameComponentRegexp,
|
||||
optional(repeated(literal(`/`), nameComponentRegexp))))
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
ReferenceRegexp = anchored(capture(NameRegexp),
|
||||
optional(literal(":"), capture(TagRegexp)),
|
||||
optional(literal("@"), capture(DigestRegexp)))
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
IdentifierRegexp = match(`([a-f0-9]{64})`)
|
||||
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
|
||||
|
||||
// anchoredIdentifierRegexp is used to check or match an
|
||||
// identifier value, anchored at start and end of string.
|
||||
anchoredIdentifierRegexp = anchored(IdentifierRegexp)
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
|
||||
)
|
||||
|
||||
// match compiles the string to a regular expression.
|
||||
var match = regexp.MustCompile
|
||||
|
||||
// literal compiles s into a literal regular expression, escaping any regexp
|
||||
// reserved characters.
|
||||
func literal(s string) *regexp.Regexp {
|
||||
re := match(regexp.QuoteMeta(s))
|
||||
|
||||
if _, complete := re.LiteralPrefix(); !complete {
|
||||
panic("must be a literal")
|
||||
}
|
||||
|
||||
return re
|
||||
}
|
||||
|
||||
// expression defines a full expression, where each regular expression must
|
||||
// follow the previous.
|
||||
func expression(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
var s string
|
||||
for _, re := range res {
|
||||
s += re.String()
|
||||
}
|
||||
|
||||
return match(s)
|
||||
}
|
||||
|
||||
// optional wraps the expression in a non-capturing group and makes the
|
||||
// production optional.
|
||||
func optional(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `?`)
|
||||
}
|
||||
|
||||
// repeated wraps the regexp in a non-capturing group to get one or more
|
||||
// matches.
|
||||
func repeated(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(group(expression(res...)).String() + `+`)
|
||||
}
|
||||
|
||||
// group wraps the regexp in a non-capturing group.
|
||||
func group(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(?:` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// capture wraps the expression in a capturing group.
|
||||
func capture(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`(` + expression(res...).String() + `)`)
|
||||
}
|
||||
|
||||
// anchored anchors the regular expression by adding start and end delimiters.
|
||||
func anchored(res ...*regexp.Regexp) *regexp.Regexp {
|
||||
return match(`^` + expression(res...).String() + `$`)
|
||||
}
|
||||
250
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
Normal file
250
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
||||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
|
||||
|
||||
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
|
||||
type Destination struct {
|
||||
writer io.Writer
|
||||
tar *tar.Writer
|
||||
repoTag string
|
||||
// Other state.
|
||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||
}
|
||||
|
||||
// NewDestination returns a tarfile.Destination for the specified io.Writer.
|
||||
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||
// For github.com/docker/docker consumers, this works just as well as
|
||||
// refString := ref.String()
|
||||
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
||||
// normalizes both of them to the same value.
|
||||
//
|
||||
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
||||
// a difference for github.com/projectatomic/docker consumers, with the
|
||||
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
||||
// These consumers treat reference strings which include a hostname and reference
|
||||
// strings without a hostname differently.
|
||||
//
|
||||
// Using the host name here is more explicit about the intent, and it has the same
|
||||
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
||||
// a hostname-qualified reference.
|
||||
// See https://github.com/containers/image/issues/72 for a more detailed
|
||||
// analysis and explanation.
|
||||
refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag())
|
||||
return &Destination{
|
||||
writer: dest,
|
||||
tar: tar.NewWriter(dest),
|
||||
repoTag: refString,
|
||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||
}
|
||||
}
|
||||
|
||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||
func (d *Destination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *Destination) SupportsSignatures() error {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *Destination) ShouldCompressLayers() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (d *Destination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() == "" {
|
||||
return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile")
|
||||
}
|
||||
|
||||
ok, size, err := d.HasBlob(inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
defer os.Remove(streamCopy.Name())
|
||||
defer streamCopy.Close()
|
||||
|
||||
size, err := io.Copy(streamCopy, stream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
_, err = streamCopy.Seek(0, os.SEEK_SET)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
||||
stream = streamCopy
|
||||
logrus.Debugf("... streaming done")
|
||||
}
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
|
||||
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with
|
||||
// the matching digest which can be reapplied using ReapplyBlob. Unlike
|
||||
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
|
||||
// the blob must also be returned. If the destination does not contain the
|
||||
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
|
||||
// returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, blob.Size, nil
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob
|
||||
// previously returned true would have been passed to PutBlob if it had
|
||||
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
|
||||
// empty. If the blob is a filesystem layer, this signifies that the changes
|
||||
// it describes need to be applied again when composing a filesystem tree.
|
||||
func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PutManifest sends the given manifest blob to the destination.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate
|
||||
// between schema versions.
|
||||
func (d *Destination) PutManifest(m []byte) error {
|
||||
var man schema2Manifest
|
||||
if err := json.Unmarshal(m, &man); err != nil {
|
||||
return errors.Wrap(err, "Error parsing manifest")
|
||||
}
|
||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||
}
|
||||
|
||||
layerPaths := []string{}
|
||||
for _, l := range man.Layers {
|
||||
layerPaths = append(layerPaths, l.Digest.String())
|
||||
}
|
||||
|
||||
items := []manifestItem{{
|
||||
Config: man.Config.Digest.String(),
|
||||
RepoTags: []string{d.repoTag},
|
||||
Layers: layerPaths,
|
||||
Parent: "",
|
||||
LayerSources: nil,
|
||||
}}
|
||||
itemsBytes, err := json.Marshal(&items)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME? Do we also need to support the legacy format?
|
||||
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
|
||||
}
|
||||
|
||||
type tarFI struct {
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
func (t *tarFI) Name() string {
|
||||
return t.path
|
||||
}
|
||||
func (t *tarFI) Size() int64 {
|
||||
return t.size
|
||||
}
|
||||
func (t *tarFI) Mode() os.FileMode {
|
||||
return 0444
|
||||
}
|
||||
func (t *tarFI) ModTime() time.Time {
|
||||
return time.Unix(0, 0)
|
||||
}
|
||||
func (t *tarFI) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (t *tarFI) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendFile sends a file into the tar stream.
|
||||
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
|
||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
logrus.Debugf("Sending as tar file %s", path)
|
||||
if err := d.tar.WriteHeader(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
size, err := io.Copy(d.tar, stream)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if size != expectedSize {
|
||||
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutSignatures adds the given signatures to the docker tarfile (currently not
|
||||
// supported). MUST be called after PutManifest (signatures reference manifest
|
||||
// contents)
|
||||
func (d *Destination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit finishes writing data to the underlying io.Writer.
|
||||
// It is the caller's responsibility to close it, if necessary.
|
||||
func (d *Destination) Commit() error {
|
||||
return d.tar.Close()
|
||||
}
|
||||
3
vendor/github.com/containers/image/docker/tarfile/doc.go
generated
vendored
Normal file
3
vendor/github.com/containers/image/docker/tarfile/doc.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
// Package tarfile is an internal implementation detail of some transports.
|
||||
// Do not use outside of the github.com/containers/image repo!
|
||||
package tarfile
|
||||
352
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
Normal file
352
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
Normal file
@@ -0,0 +1,352 @@
|
||||
package tarfile
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||
type Source struct {
|
||||
tarPath string
|
||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||
tarManifest *manifestItem // nil if not available yet.
|
||||
configBytes []byte
|
||||
configDigest digest.Digest
|
||||
orderedDiffIDList []diffID
|
||||
knownLayers map[diffID]*layerInfo
|
||||
// Other state
|
||||
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
|
||||
}
|
||||
|
||||
type layerInfo struct {
|
||||
path string
|
||||
size int64
|
||||
}
|
||||
|
||||
// NewSource returns a tarfile.Source for the specified path.
|
||||
func NewSource(path string) *Source {
|
||||
// TODO: We could add support for multiple images in a single archive, so
|
||||
// that people could use docker-archive:opensuse.tar:opensuse:leap as
|
||||
// the source of an image.
|
||||
return &Source{
|
||||
tarPath: path,
|
||||
}
|
||||
}
|
||||
|
||||
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
||||
type tarReadCloser struct {
|
||||
*tar.Reader
|
||||
backingFile *os.File
|
||||
}
|
||||
|
||||
func (t *tarReadCloser) Close() error {
|
||||
return t.backingFile.Close()
|
||||
}
|
||||
|
||||
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
||||
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
||||
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
|
||||
// The caller should call .Close() on the returned stream.
|
||||
func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||
f, err := os.Open(s.tarPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
succeeded := false
|
||||
defer func() {
|
||||
if !succeeded {
|
||||
f.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
tarReader, header, err := findTarComponent(f, componentPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
||||
// We follow only one symlink; so no loops are possible.
|
||||
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
||||
// so we don't care.
|
||||
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if header == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
if !header.FileInfo().Mode().IsRegular() {
|
||||
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||
}
|
||||
succeeded = true
|
||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||
}
|
||||
|
||||
// findTarComponent returns a header and a reader matching path within inputFile,
|
||||
// or (nil, nil, nil) if not found.
|
||||
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
|
||||
t := tar.NewReader(inputFile)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if h.Name == path {
|
||||
return t, h, nil
|
||||
}
|
||||
}
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
// readTarComponent returns full contents of componentPath.
|
||||
func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||
file, err := s.openTarComponent(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||
}
|
||||
defer file.Close()
|
||||
bytes, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bytes, nil
|
||||
}
|
||||
|
||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||
func (s *Source) ensureCachedDataIsPresent() error {
|
||||
if s.tarManifest != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Read and parse manifest.json
|
||||
tarManifest, err := s.loadTarManifest()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Read and parse config.
|
||||
configBytes, err := s.readTarComponent(tarManifest.Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
|
||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
||||
}
|
||||
|
||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Success; commit.
|
||||
s.tarManifest = tarManifest
|
||||
s.configBytes = configBytes
|
||||
s.configDigest = digest.FromBytes(configBytes)
|
||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||
s.knownLayers = knownLayers
|
||||
return nil
|
||||
}
|
||||
|
||||
// loadTarManifest loads and decodes the manifest.json.
|
||||
func (s *Source) loadTarManifest() (*manifestItem, error) {
|
||||
// FIXME? Do we need to deal with the legacy format?
|
||||
bytes, err := s.readTarComponent(manifestFileName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var items []manifestItem
|
||||
if err := json.Unmarshal(bytes, &items); err != nil {
|
||||
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
||||
}
|
||||
if len(items) != 1 {
|
||||
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
|
||||
}
|
||||
return &items[0], nil
|
||||
}
|
||||
|
||||
func (s *Source) prepareLayerData(tarManifest *manifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
|
||||
// Collect layer data available in manifest and config.
|
||||
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
||||
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
||||
}
|
||||
knownLayers := map[diffID]*layerInfo{}
|
||||
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
||||
for i, diffID := range parsedConfig.RootFS.DiffIDs {
|
||||
if _, ok := knownLayers[diffID]; ok {
|
||||
// Apparently it really can happen that a single image contains the same layer diff more than once.
|
||||
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
|
||||
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
||||
continue
|
||||
}
|
||||
layerPath := tarManifest.Layers[i]
|
||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||
}
|
||||
li := &layerInfo{ // A new element in each iteration
|
||||
path: layerPath,
|
||||
size: -1,
|
||||
}
|
||||
knownLayers[diffID] = li
|
||||
unknownLayerSizes[layerPath] = li
|
||||
}
|
||||
|
||||
// Scan the tar file to collect layer sizes.
|
||||
file, err := os.Open(s.tarPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
t := tar.NewReader(file)
|
||||
for {
|
||||
h, err := t.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||
li.size = h.Size
|
||||
delete(unknownLayerSizes, h.Name)
|
||||
}
|
||||
}
|
||||
if len(unknownLayerSizes) != 0 {
|
||||
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
||||
}
|
||||
|
||||
return knownLayers, nil
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
func (s *Source) GetManifest() ([]byte, string, error) {
|
||||
if s.generatedManifest == nil {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
m := schema2Manifest{
|
||||
SchemaVersion: 2,
|
||||
MediaType: manifest.DockerV2Schema2MediaType,
|
||||
Config: distributionDescriptor{
|
||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||
Size: int64(len(s.configBytes)),
|
||||
Digest: s.configDigest,
|
||||
},
|
||||
Layers: []distributionDescriptor{},
|
||||
}
|
||||
for _, diffID := range s.orderedDiffIDList {
|
||||
li, ok := s.knownLayers[diffID]
|
||||
if !ok {
|
||||
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||
}
|
||||
m.Layers = append(m.Layers, distributionDescriptor{
|
||||
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
||||
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
||||
Size: li.size,
|
||||
})
|
||||
}
|
||||
manifestBytes, err := json.Marshal(&m)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.generatedManifest = manifestBytes
|
||||
}
|
||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||
}
|
||||
|
||||
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
|
||||
// out of a manifest list.
|
||||
func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
|
||||
return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
||||
}
|
||||
|
||||
type readCloseWrapper struct {
|
||||
io.Reader
|
||||
closeFunc func() error
|
||||
}
|
||||
|
||||
func (r readCloseWrapper) Close() error {
|
||||
if r.closeFunc != nil {
|
||||
return r.closeFunc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
||||
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||
}
|
||||
|
||||
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
|
||||
stream, err := s.openTarComponent(li.path)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
// In order to handle the fact that digests != diffIDs (and thus that a
|
||||
// caller which is trying to verify the blob will run into problems),
|
||||
// we need to decompress blobs. This is a bit ugly, but it's a
|
||||
// consequence of making everything addressable by their DiffID rather
|
||||
// than by their digest...
|
||||
//
|
||||
// In particular, because the v2s2 manifest being generated uses
|
||||
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
||||
// layers not their _actual_ digest. The result is that copy/... will
|
||||
// be verifing a "digest" which is not the actual layer's digest (but
|
||||
// is instead the DiffID).
|
||||
|
||||
decompressFunc, reader, err := compression.DetectCompression(stream)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
|
||||
}
|
||||
|
||||
if decompressFunc != nil {
|
||||
reader, err = decompressFunc(reader)
|
||||
if err != nil {
|
||||
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
|
||||
}
|
||||
}
|
||||
|
||||
newStream := readCloseWrapper{
|
||||
Reader: reader,
|
||||
closeFunc: stream.Close,
|
||||
}
|
||||
|
||||
return newStream, li.size, nil
|
||||
}
|
||||
|
||||
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
func (s *Source) GetSignatures() ([][]byte, error) {
|
||||
return [][]byte{}, nil
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
package daemon
|
||||
package tarfile
|
||||
|
||||
import "github.com/docker/distribution/digest"
|
||||
import "github.com/opencontainers/go-digest"
|
||||
|
||||
// Various data structures.
|
||||
|
||||
@@ -43,7 +43,7 @@ type schema2Manifest struct {
|
||||
|
||||
// Based on github.com/docker/docker/image/image.go
|
||||
// MOST CONTENT OMITTED AS UNNECESSARY
|
||||
type dockerImage struct {
|
||||
type image struct {
|
||||
RootFS *rootFS `json:"rootfs,omitempty"`
|
||||
}
|
||||
|
||||
9
vendor/github.com/containers/image/image/docker_list.go
generated
vendored
9
vendor/github.com/containers/image/image/docker_list.go
generated
vendored
@@ -2,13 +2,12 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type platformSpec struct {
|
||||
@@ -54,10 +53,10 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen
|
||||
|
||||
matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
return nil, errors.Wrap(err, "Error computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, fmt.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
|
||||
return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
|
||||
}
|
||||
|
||||
return manifestInstanceFromBlob(src, manblob, mt)
|
||||
|
||||
57
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
57
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
@@ -2,8 +2,6 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -11,7 +9,9 @@ import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -54,7 +54,7 @@ func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
|
||||
return nil, err
|
||||
}
|
||||
if mschema1.SchemaVersion != 1 {
|
||||
return nil, fmt.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
|
||||
return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
|
||||
}
|
||||
if len(mschema1.FSLayers) != len(mschema1.History) {
|
||||
return nil, errors.New("length of history not equal to number of layers")
|
||||
@@ -73,7 +73,7 @@ func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
|
||||
func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest {
|
||||
var name, tag string
|
||||
if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
|
||||
name = ref.RemoteName()
|
||||
name = reference.Path(ref)
|
||||
if tagged, ok := ref.(reference.NamedTagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
@@ -113,6 +113,17 @@ func (m *manifestSchema1) ConfigBlob() ([]byte, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
|
||||
v2s2, err := m.convertToManifestSchema2(nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v2s2.OCIConfig()
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
@@ -153,7 +164,7 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
if options.LayerInfos != nil {
|
||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||
if len(copy.FSLayers) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
|
||||
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
|
||||
}
|
||||
for i, info := range options.LayerInfos {
|
||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||
@@ -171,7 +182,7 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
|
||||
default:
|
||||
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
|
||||
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
@@ -211,7 +222,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
for _, img := range imgs {
|
||||
// skip IDs that appear after each other, we handle those later
|
||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||
}
|
||||
lastID = img.ID
|
||||
idmap[lastID] = struct{}{}
|
||||
@@ -222,7 +233,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
|
||||
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
|
||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -230,7 +241,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
|
||||
|
||||
func validateV1ID(id string) error {
|
||||
if ok := validHex.MatchString(id); !ok {
|
||||
return fmt.Errorf("image ID %q is invalid", id)
|
||||
return errors.Errorf("image ID %q is invalid", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -239,16 +250,16 @@ func validateV1ID(id string) error {
|
||||
func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
|
||||
if len(m.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
|
||||
}
|
||||
if len(m.History) != len(m.FSLayers) {
|
||||
return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
||||
return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
|
||||
}
|
||||
if len(uploadedLayerInfos) != len(m.FSLayers) {
|
||||
return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
||||
if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
|
||||
}
|
||||
if len(layerDiffIDs) != len(m.FSLayers) {
|
||||
return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
||||
if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
|
||||
return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
|
||||
}
|
||||
|
||||
rootFS := rootFS{
|
||||
@@ -263,7 +274,7 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
|
||||
var v1compat v1Compatibility
|
||||
if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
|
||||
return nil, fmt.Errorf("Error decoding history entry %d: %v", v1Index, err)
|
||||
return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
|
||||
}
|
||||
history[v2Index] = imageHistory{
|
||||
Created: v1compat.Created,
|
||||
@@ -274,12 +285,20 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
|
||||
}
|
||||
|
||||
if !v1compat.ThrowAway {
|
||||
var size int64
|
||||
if uploadedLayerInfos != nil {
|
||||
size = uploadedLayerInfos[v2Index].Size
|
||||
}
|
||||
var d digest.Digest
|
||||
if layerDiffIDs != nil {
|
||||
d = layerDiffIDs[v2Index]
|
||||
}
|
||||
layers = append(layers, descriptor{
|
||||
MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
Size: uploadedLayerInfos[v2Index].Size,
|
||||
Size: size,
|
||||
Digest: m.FSLayers[v1Index].BlobSum,
|
||||
})
|
||||
rootFS.DiffIDs = append(rootFS.DiffIDs, layerDiffIDs[v2Index])
|
||||
rootFS.DiffIDs = append(rootFS.DiffIDs, d)
|
||||
}
|
||||
}
|
||||
configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
|
||||
|
||||
75
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
75
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -5,14 +5,15 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
|
||||
@@ -77,12 +78,30 @@ func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
||||
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||
}
|
||||
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestSchema2) OCIConfig() (*imgspecv1.Image, error) {
|
||||
configBlob, err := m.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
|
||||
// than OCI v1. This unmarshal makes sure we drop docker v2s2
|
||||
// fields that aren't needed in OCI v1.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(configBlob, configOCI); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configOCI, nil
|
||||
}
|
||||
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||
if m.configBlob == nil {
|
||||
if m.src == nil {
|
||||
return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
||||
Digest: m.ConfigDescriptor.Digest,
|
||||
@@ -99,7 +118,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
|
||||
}
|
||||
computedDigest := digest.FromBytes(blob)
|
||||
if computedDigest != m.ConfigDescriptor.Digest {
|
||||
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
}
|
||||
m.configBlob = blob
|
||||
}
|
||||
@@ -152,7 +171,7 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
|
||||
if options.LayerInfos != nil {
|
||||
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
}
|
||||
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
|
||||
for i, info := range options.LayerInfos {
|
||||
@@ -166,13 +185,47 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ
|
||||
case "": // No conversion, OK
|
||||
case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType:
|
||||
return copy.convertToManifestSchema1(options.InformationOnly.Destination)
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
return copy.convertToManifestOCI1()
|
||||
default:
|
||||
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
|
||||
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
|
||||
configOCI, err := m.OCIConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configOCIBytes, err := json.Marshal(configOCI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := descriptor{
|
||||
MediaType: imgspecv1.MediaTypeImageConfig,
|
||||
Size: int64(len(configOCIBytes)),
|
||||
Digest: digest.FromBytes(configOCIBytes),
|
||||
}
|
||||
|
||||
layers := make([]descriptor, len(m.LayersDescriptors))
|
||||
for idx := range layers {
|
||||
layers[idx] = m.LayersDescriptors[idx]
|
||||
if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||
} else {
|
||||
// we assume layers are gzip'ed because docker v2s2 only deals with
|
||||
// gzip'ed layers. However, OCI has non-gzip'ed layers as well.
|
||||
layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
|
||||
}
|
||||
}
|
||||
|
||||
m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers)
|
||||
return memoryImageFromManifest(m1), nil
|
||||
}
|
||||
|
||||
// Based on docker/distribution/manifest/schema1/config_builder.go
|
||||
func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) {
|
||||
configBytes, err := m.ConfigBlob()
|
||||
@@ -193,7 +246,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||
haveGzippedEmptyLayer := false
|
||||
if len(imageConfig.History) == 0 {
|
||||
// What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
|
||||
return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
|
||||
return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
|
||||
}
|
||||
for v2Index, historyEntry := range imageConfig.History {
|
||||
parentV1ID = v1ID
|
||||
@@ -205,17 +258,17 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error uploading empty layer: %v", err)
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
if info.Digest != gzippedEmptyLayerDigest {
|
||||
return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
|
||||
}
|
||||
haveGzippedEmptyLayer = true
|
||||
}
|
||||
blobDigest = gzippedEmptyLayerDigest
|
||||
} else {
|
||||
if nonemptyLayerIndex >= len(m.LayersDescriptors) {
|
||||
return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
|
||||
return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
|
||||
}
|
||||
blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
|
||||
nonemptyLayerIndex++
|
||||
@@ -239,7 +292,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
|
||||
fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
|
||||
v1CompatibilityBytes, err := json.Marshal(&fakeImage)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
||||
return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
|
||||
}
|
||||
|
||||
fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
|
||||
|
||||
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
9
vendor/github.com/containers/image/image/manifest.go
generated
vendored
@@ -3,11 +3,10 @@ package image
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/engine-api/types/strslice"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/strslice"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
@@ -65,6 +64,10 @@ type genericManifest interface {
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
ConfigBlob() ([]byte, error)
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
OCIConfig() (*imgspecv1.Image, error)
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
|
||||
10
vendor/github.com/containers/image/image/memory.go
generated
vendored
10
vendor/github.com/containers/image/image/memory.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
@@ -32,7 +32,13 @@ func (i *memoryImage) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized UnparsedImage, if any.
|
||||
func (i *memoryImage) Close() {
|
||||
func (i *memoryImage) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Size returns the size of the image as stored, if known, or -1 if not.
|
||||
func (i *memoryImage) Size() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
|
||||
35
vendor/github.com/containers/image/image/oci.go
generated
vendored
35
vendor/github.com/containers/image/image/oci.go
generated
vendored
@@ -2,20 +2,19 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type manifestOCI1 struct {
|
||||
src types.ImageSource // May be nil if configBlob is not nil
|
||||
configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
ConfigDescriptor descriptor `json:"config"`
|
||||
LayersDescriptors []descriptor `json:"layers"`
|
||||
}
|
||||
@@ -29,12 +28,11 @@ func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericMa
|
||||
}
|
||||
|
||||
// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
|
||||
func manifestOCI1FromComponents(config descriptor, configBlob []byte, layers []descriptor) genericManifest {
|
||||
func manifestOCI1FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
|
||||
return &manifestOCI1{
|
||||
src: nil,
|
||||
src: src,
|
||||
configBlob: configBlob,
|
||||
SchemaVersion: 2,
|
||||
MediaType: imgspecv1.MediaTypeImageManifest,
|
||||
ConfigDescriptor: config,
|
||||
LayersDescriptors: layers,
|
||||
}
|
||||
@@ -45,7 +43,7 @@ func (m *manifestOCI1) serialize() ([]byte, error) {
|
||||
}
|
||||
|
||||
func (m *manifestOCI1) manifestMIMEType() string {
|
||||
return m.MediaType
|
||||
return imgspecv1.MediaTypeImageManifest
|
||||
}
|
||||
|
||||
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||
@@ -59,7 +57,7 @@ func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
|
||||
func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||
if m.configBlob == nil {
|
||||
if m.src == nil {
|
||||
return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(types.BlobInfo{
|
||||
Digest: m.ConfigDescriptor.Digest,
|
||||
@@ -76,13 +74,28 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
|
||||
}
|
||||
computedDigest := digest.FromBytes(blob)
|
||||
if computedDigest != m.ConfigDescriptor.Digest {
|
||||
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
|
||||
}
|
||||
m.configBlob = blob
|
||||
}
|
||||
return m.configBlob, nil
|
||||
}
|
||||
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
|
||||
cb, err := m.ConfigBlob()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return configOCI, nil
|
||||
}
|
||||
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
@@ -125,7 +138,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
|
||||
copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
|
||||
if options.LayerInfos != nil {
|
||||
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
}
|
||||
copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
|
||||
for i, info := range options.LayerInfos {
|
||||
@@ -139,7 +152,7 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
return copy.convertToManifestSchema2()
|
||||
default:
|
||||
return nil, fmt.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
|
||||
return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
|
||||
}
|
||||
|
||||
return memoryImageFromManifest(©), nil
|
||||
|
||||
5
vendor/github.com/containers/image/image/sourced.go
generated
vendored
5
vendor/github.com/containers/image/image/sourced.go
generated
vendored
@@ -71,6 +71,11 @@ func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
|
||||
func (i *sourcedImage) Size() (int64, error) {
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
|
||||
func (i *sourcedImage) Manifest() ([]byte, string, error) {
|
||||
return i.manifestBlob, i.manifestMIMEType, nil
|
||||
|
||||
14
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
14
vendor/github.com/containers/image/image/unparsed.go
generated
vendored
@@ -1,11 +1,11 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// UnparsedImage implements types.UnparsedImage .
|
||||
@@ -36,8 +36,8 @@ func (i *UnparsedImage) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized UnparsedImage, if any.
|
||||
func (i *UnparsedImage) Close() {
|
||||
i.src.Close()
|
||||
func (i *UnparsedImage) Close() error {
|
||||
return i.src.Close()
|
||||
}
|
||||
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
@@ -53,13 +53,13 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) {
|
||||
ref := i.Reference().DockerReference()
|
||||
if ref != nil {
|
||||
if canonical, ok := ref.(reference.Canonical); ok {
|
||||
digest := canonical.Digest()
|
||||
digest := digest.Digest(canonical.Digest())
|
||||
matches, err := manifest.MatchesDigest(m, digest)
|
||||
if err != nil {
|
||||
return nil, "", fmt.Errorf("Error computing manifest digest: %v", err)
|
||||
return nil, "", errors.Wrap(err, "Error computing manifest digest")
|
||||
}
|
||||
if !matches {
|
||||
return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||
return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
2
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
@@ -3,8 +3,8 @@ package manifest
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/libtrust"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
|
||||
95
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
95
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@@ -2,16 +2,16 @@ package layout
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
@@ -31,20 +31,20 @@ func (d *ociImageDestination) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *ociImageDestination) Close() {
|
||||
func (d *ociImageDestination) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return []string{
|
||||
imgspecv1.MediaTypeImageManifest,
|
||||
manifest.DockerV2Schema2MediaType,
|
||||
}
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
func (d *ociImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||
return errors.Errorf("Pushing signatures for OCI images is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
@@ -80,7 +80,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
}
|
||||
}()
|
||||
|
||||
digester := digest.Canonical.New()
|
||||
digester := digest.Canonical.Digester()
|
||||
tee := io.TeeReader(stream, digester.Hash())
|
||||
|
||||
size, err := io.Copy(blobFile, tee)
|
||||
@@ -89,7 +89,7 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
}
|
||||
computedDigest := digester.Digest()
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
return types.BlobInfo{}, errors.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@@ -112,60 +112,42 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
func createManifest(m []byte) ([]byte, string, error) {
|
||||
om := imgspecv1.Manifest{}
|
||||
mt := manifest.GuessMIMEType(m)
|
||||
switch mt {
|
||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
|
||||
// There a simple reason about not yet implementing this.
|
||||
// OCI image-spec assure about backward compatibility with docker v2s2 but not v2s1
|
||||
// generating a v2s2 is a migration docker does when upgrading to 1.10.3
|
||||
// and I don't think we should bother about this now (I don't want to have migration code here in skopeo)
|
||||
return nil, "", errors.New("can't create an OCI manifest from Docker V2 schema 1 manifest")
|
||||
case manifest.DockerV2Schema2MediaType:
|
||||
if err := json.Unmarshal(m, &om); err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
om.MediaType = imgspecv1.MediaTypeImageManifest
|
||||
for i, l := range om.Layers {
|
||||
if l.MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
|
||||
om.Layers[i].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
|
||||
} else {
|
||||
om.Layers[i].MediaType = imgspecv1.MediaTypeImageLayer
|
||||
}
|
||||
}
|
||||
om.Config.MediaType = imgspecv1.MediaTypeImageConfig
|
||||
b, err := json.Marshal(om)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return b, om.MediaType, nil
|
||||
case manifest.DockerV2ListMediaType:
|
||||
return nil, "", errors.New("can't create an OCI manifest from Docker V2 schema 2 manifest list")
|
||||
case imgspecv1.MediaTypeImageManifestList:
|
||||
return nil, "", errors.New("can't create an OCI manifest from OCI manifest list")
|
||||
case imgspecv1.MediaTypeImageManifest:
|
||||
return m, mt, nil
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
return nil, "", fmt.Errorf("unrecognized manifest media type %q", mt)
|
||||
blobPath, err := d.ref.blobPath(info.Digest)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||
// TODO(mitr, runcom): this breaks signatures entirely since at this point we're creating a new manifest
|
||||
// and signatures don't apply anymore. Will fix.
|
||||
ociMan, mt, err := createManifest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
digest, err := manifest.Digest(ociMan)
|
||||
digest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
desc := imgspecv1.Descriptor{}
|
||||
desc.Digest = digest.String()
|
||||
desc.Digest = digest
|
||||
// TODO(runcom): beaware and add support for OCI manifest list
|
||||
desc.MediaType = mt
|
||||
desc.Size = int64(len(ociMan))
|
||||
desc.MediaType = imgspecv1.MediaTypeImageManifest
|
||||
desc.Size = int64(len(m))
|
||||
data, err := json.Marshal(desc)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -175,7 +157,10 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(blobPath, ociMan, 0644); err != nil {
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutil.WriteFile(blobPath, m, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO(runcom): ugly here?
|
||||
@@ -205,7 +190,7 @@ func ensureParentDirectoryExists(path string) error {
|
||||
|
||||
func (d *ociImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if len(signatures) != 0 {
|
||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||
return errors.Errorf("Pushing signatures for OCI images is not supported")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
14
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
14
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
@@ -6,9 +6,8 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
@@ -27,7 +26,8 @@ func (s *ociImageSource) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *ociImageSource) Close() {
|
||||
func (s *ociImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
@@ -54,7 +54,7 @@ func (s *ociImageSource) GetManifest() ([]byte, string, error) {
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return m, manifest.GuessMIMEType(m), nil
|
||||
return m, desc.MediaType, nil
|
||||
}
|
||||
|
||||
func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
@@ -68,7 +68,11 @@ func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
return m, manifest.GuessMIMEType(m), nil
|
||||
// XXX: GetTargetManifest means that we don't have the context of what
|
||||
// mediaType the manifest has. In OCI this means that we don't know
|
||||
// what reference it came from, so we just *assume* that its
|
||||
// MediaTypeImageManifest.
|
||||
return m, imgspecv1.MediaTypeImageManifest, nil
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
|
||||
25
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
25
vendor/github.com/containers/image/oci/layout/oci_transport.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package layout
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
@@ -10,10 +9,16 @@ import (
|
||||
"github.com/containers/image/directory/explicitfilepath"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for OCI directories.
|
||||
var Transport = ociTransport{}
|
||||
|
||||
@@ -43,16 +48,16 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
dir = scope[:sep]
|
||||
tag := scope[sep+1:]
|
||||
if !refRegexp.MatchString(tag) {
|
||||
return fmt.Errorf("Invalid tag %s", tag)
|
||||
return errors.Errorf("Invalid tag %s", tag)
|
||||
}
|
||||
}
|
||||
|
||||
if strings.Contains(dir, ":") {
|
||||
return fmt.Errorf("Invalid OCI reference %s: path contains a colon", scope)
|
||||
return errors.Errorf("Invalid OCI reference %s: path contains a colon", scope)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(dir, "/") {
|
||||
return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
|
||||
return errors.Errorf("Invalid scope %s: must be an absolute path", scope)
|
||||
}
|
||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||
@@ -62,7 +67,7 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
}
|
||||
cleaned := filepath.Clean(dir)
|
||||
if cleaned != dir {
|
||||
return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||
return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -106,10 +111,10 @@ func NewReference(dir, tag string) (types.ImageReference, error) {
|
||||
// This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces
|
||||
// from being ambiguous with values of PolicyConfigurationIdentity.
|
||||
if strings.Contains(resolved, ":") {
|
||||
return nil, fmt.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
|
||||
return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, tag, resolved)
|
||||
}
|
||||
if !refRegexp.MatchString(tag) {
|
||||
return nil, fmt.Errorf("Invalid tag %s", tag)
|
||||
return nil, errors.Errorf("Invalid tag %s", tag)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, tag: tag}, nil
|
||||
}
|
||||
@@ -191,7 +196,7 @@ func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.Ima
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref ociReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
return fmt.Errorf("Deleting images not implemented for oci: images")
|
||||
return errors.Errorf("Deleting images not implemented for oci: images")
|
||||
}
|
||||
|
||||
// ociLayoutPathPath returns a path for the oci-layout within a directory using OCI conventions.
|
||||
@@ -202,7 +207,7 @@ func (ref ociReference) ociLayoutPath() string {
|
||||
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
|
||||
func (ref ociReference) blobPath(digest digest.Digest) (string, error) {
|
||||
if err := digest.Validate(); err != nil {
|
||||
return "", fmt.Errorf("unexpected digest reference %s: %v", digest, err)
|
||||
return "", errors.Wrapf(err, "unexpected digest reference %s", digest)
|
||||
}
|
||||
return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil
|
||||
}
|
||||
|
||||
155
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
155
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@@ -14,13 +13,14 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/imdario/mergo"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/homedir"
|
||||
utilnet "k8s.io/kubernetes/pkg/util/net"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/http2"
|
||||
"k8s.io/client-go/util/homedir"
|
||||
)
|
||||
|
||||
// restTLSClientConfig is a modified copy of k8s.io/kubernets/pkg/client/restclient.TLSClientConfig.
|
||||
@@ -348,20 +348,20 @@ func validateClusterInfo(clusterName string, clusterInfo clientcmdCluster) []err
|
||||
|
||||
if len(clusterInfo.Server) == 0 {
|
||||
if len(clusterName) == 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("default cluster has no server defined"))
|
||||
validationErrors = append(validationErrors, errors.Errorf("default cluster has no server defined"))
|
||||
} else {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("no server found for cluster %q", clusterName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("no server found for cluster %q", clusterName))
|
||||
}
|
||||
}
|
||||
// Make sure CA data and CA file aren't both specified
|
||||
if len(clusterInfo.CertificateAuthority) != 0 && len(clusterInfo.CertificateAuthorityData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("certificate-authority-data and certificate-authority are both specified for %v. certificate-authority-data will override", clusterName))
|
||||
}
|
||||
if len(clusterInfo.CertificateAuthority) != 0 {
|
||||
clientCertCA, err := os.Open(clusterInfo.CertificateAuthority)
|
||||
defer clientCertCA.Close()
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
|
||||
validationErrors = append(validationErrors, errors.Errorf("unable to read certificate-authority %v for %v due to %v", clusterInfo.CertificateAuthority, clusterName, err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -385,36 +385,36 @@ func validateAuthInfo(authInfoName string, authInfo clientcmdAuthInfo) []error {
|
||||
if len(authInfo.ClientCertificate) != 0 || len(authInfo.ClientCertificateData) != 0 {
|
||||
// Make sure cert data and file aren't both specified
|
||||
if len(authInfo.ClientCertificate) != 0 && len(authInfo.ClientCertificateData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("client-cert-data and client-cert are both specified for %v. client-cert-data will override", authInfoName))
|
||||
}
|
||||
// Make sure key data and file aren't both specified
|
||||
if len(authInfo.ClientKey) != 0 && len(authInfo.ClientKeyData) != 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("client-key-data and client-key are both specified for %v; client-key-data will override", authInfoName))
|
||||
}
|
||||
// Make sure a key is specified
|
||||
if len(authInfo.ClientKey) == 0 && len(authInfo.ClientKeyData) == 0 {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
|
||||
validationErrors = append(validationErrors, errors.Errorf("client-key-data or client-key must be specified for %v to use the clientCert authentication method", authInfoName))
|
||||
}
|
||||
|
||||
if len(authInfo.ClientCertificate) != 0 {
|
||||
clientCertFile, err := os.Open(authInfo.ClientCertificate)
|
||||
defer clientCertFile.Close()
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
|
||||
validationErrors = append(validationErrors, errors.Errorf("unable to read client-cert %v for %v due to %v", authInfo.ClientCertificate, authInfoName, err))
|
||||
}
|
||||
}
|
||||
if len(authInfo.ClientKey) != 0 {
|
||||
clientKeyFile, err := os.Open(authInfo.ClientKey)
|
||||
defer clientKeyFile.Close()
|
||||
if err != nil {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
|
||||
validationErrors = append(validationErrors, errors.Errorf("unable to read client-key %v for %v due to %v", authInfo.ClientKey, authInfoName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// authPath also provides information for the client to identify the server, so allow multiple auth methods in that case
|
||||
if (len(methods) > 1) && (!usingAuthPath) {
|
||||
validationErrors = append(validationErrors, fmt.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
|
||||
validationErrors = append(validationErrors, errors.Errorf("more than one authentication method found for %v; found %v, only one is allowed", authInfoName, methods))
|
||||
}
|
||||
|
||||
return validationErrors
|
||||
@@ -450,6 +450,55 @@ func (config *directClientConfig) getCluster() clientcmdCluster {
|
||||
return mergedClusterInfo
|
||||
}
|
||||
|
||||
// aggregateErr is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.
|
||||
// This helper implements the error and Errors interfaces. Keeping it private
|
||||
// prevents people from making an aggregate of 0 errors, which is not
|
||||
// an error, but does satisfy the error interface.
|
||||
type aggregateErr []error
|
||||
|
||||
// newAggregate is a modified copy of k8s.io/apimachinery/pkg/util/errors.NewAggregate.
|
||||
// NewAggregate converts a slice of errors into an Aggregate interface, which
|
||||
// is itself an implementation of the error interface. If the slice is empty,
|
||||
// this returns nil.
|
||||
// It will check if any of the element of input error list is nil, to avoid
|
||||
// nil pointer panic when call Error().
|
||||
func newAggregate(errlist []error) error {
|
||||
if len(errlist) == 0 {
|
||||
return nil
|
||||
}
|
||||
// In case of input error list contains nil
|
||||
var errs []error
|
||||
for _, e := range errlist {
|
||||
if e != nil {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
}
|
||||
if len(errs) == 0 {
|
||||
return nil
|
||||
}
|
||||
return aggregateErr(errs)
|
||||
}
|
||||
|
||||
// Error is a modified copy of k8s.io/apimachinery/pkg/util/errors.aggregate.Error.
|
||||
// Error is part of the error interface.
|
||||
func (agg aggregateErr) Error() string {
|
||||
if len(agg) == 0 {
|
||||
// This should never happen, really.
|
||||
return ""
|
||||
}
|
||||
if len(agg) == 1 {
|
||||
return agg[0].Error()
|
||||
}
|
||||
result := fmt.Sprintf("[%s", agg[0].Error())
|
||||
for i := 1; i < len(agg); i++ {
|
||||
result += fmt.Sprintf(", %s", agg[i].Error())
|
||||
}
|
||||
result += "]"
|
||||
return result
|
||||
}
|
||||
|
||||
// REMOVED: aggregateErr.Errors
|
||||
|
||||
// errConfigurationInvalid is a modified? copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.errConfigurationInvalid.
|
||||
// errConfigurationInvalid is a set of errors indicating the configuration is invalid.
|
||||
type errConfigurationInvalid []error
|
||||
@@ -470,7 +519,7 @@ func newErrConfigurationInvalid(errs []error) error {
|
||||
|
||||
// Error implements the error interface
|
||||
func (e errConfigurationInvalid) Error() string {
|
||||
return fmt.Sprintf("invalid configuration: %v", utilerrors.NewAggregate(e).Error())
|
||||
return fmt.Sprintf("invalid configuration: %v", newAggregate(e).Error())
|
||||
}
|
||||
|
||||
// clientConfigLoadingRules is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.ClientConfigLoadingRules
|
||||
@@ -518,7 +567,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
errlist = append(errlist, fmt.Errorf("Error loading config file \"%s\": %v", filename, err))
|
||||
errlist = append(errlist, errors.Wrapf(err, "Error loading config file \"%s\"", filename))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -550,7 +599,7 @@ func (rules *clientConfigLoadingRules) Load() (*clientcmdConfig, error) {
|
||||
errlist = append(errlist, err)
|
||||
}
|
||||
|
||||
return config, utilerrors.NewAggregate(errlist)
|
||||
return config, newAggregate(errlist)
|
||||
}
|
||||
|
||||
// loadFromFile is a modified copy of k8s.io/kubernetes/pkg/client/unversioned/clientcmd.LoadFromFile
|
||||
@@ -623,7 +672,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
|
||||
}
|
||||
base, err := filepath.Abs(filepath.Dir(cluster.LocationOfOrigin))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", cluster.LocationOfOrigin, err)
|
||||
return errors.Wrapf(err, "Could not determine the absolute path of config file %s", cluster.LocationOfOrigin)
|
||||
}
|
||||
|
||||
if err := resolvePaths(getClusterFileReferences(cluster), base); err != nil {
|
||||
@@ -636,7 +685,7 @@ func resolveLocalPaths(config *clientcmdConfig) error {
|
||||
}
|
||||
base, err := filepath.Abs(filepath.Dir(authInfo.LocationOfOrigin))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Could not determine the absolute path of config file %s: %v", authInfo.LocationOfOrigin, err)
|
||||
return errors.Wrapf(err, "Could not determine the absolute path of config file %s", authInfo.LocationOfOrigin)
|
||||
}
|
||||
|
||||
if err := resolvePaths(getAuthInfoFileReferences(authInfo), base); err != nil {
|
||||
@@ -706,7 +755,7 @@ func restClientFor(config *restConfig) (*url.URL, *http.Client, error) {
|
||||
// Kubernetes API.
|
||||
func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
|
||||
if host == "" {
|
||||
return nil, fmt.Errorf("host must be a URL or a host:port pair")
|
||||
return nil, errors.Errorf("host must be a URL or a host:port pair")
|
||||
}
|
||||
base := host
|
||||
hostURL, err := url.Parse(base)
|
||||
@@ -723,7 +772,7 @@ func defaultServerURL(host string, defaultTLS bool) (*url.URL, error) {
|
||||
return nil, err
|
||||
}
|
||||
if hostURL.Path != "" && hostURL.Path != "/" {
|
||||
return nil, fmt.Errorf("host must be a URL or a host:port pair: %q", base)
|
||||
return nil, errors.Errorf("host must be a URL or a host:port pair: %q", base)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -793,12 +842,58 @@ func transportNew(config *restConfig) (http.RoundTripper, error) {
|
||||
|
||||
// REMOVED: HTTPWrappersForConfig(config, rt) in favor of the caller setting HTTP headers itself based on restConfig. Only this inlined check remains.
|
||||
if len(config.Username) != 0 && len(config.BearerToken) != 0 {
|
||||
return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
|
||||
return nil, errors.Errorf("username/password or bearer token may be set, but not both")
|
||||
}
|
||||
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
// newProxierWithNoProxyCIDR is a modified copy of k8s.io/apimachinery/pkg/util/net.NewProxierWithNoProxyCIDR.
|
||||
// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if
|
||||
// no matching CIDRs are found
|
||||
func newProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) {
|
||||
// we wrap the default method, so we only need to perform our check if the NO_PROXY envvar has a CIDR in it
|
||||
noProxyEnv := os.Getenv("NO_PROXY")
|
||||
noProxyRules := strings.Split(noProxyEnv, ",")
|
||||
|
||||
cidrs := []*net.IPNet{}
|
||||
for _, noProxyRule := range noProxyRules {
|
||||
_, cidr, _ := net.ParseCIDR(noProxyRule)
|
||||
if cidr != nil {
|
||||
cidrs = append(cidrs, cidr)
|
||||
}
|
||||
}
|
||||
|
||||
if len(cidrs) == 0 {
|
||||
return delegate
|
||||
}
|
||||
|
||||
return func(req *http.Request) (*url.URL, error) {
|
||||
host := req.URL.Host
|
||||
// for some urls, the Host is already the host, not the host:port
|
||||
if net.ParseIP(host) == nil {
|
||||
var err error
|
||||
host, _, err = net.SplitHostPort(req.URL.Host)
|
||||
if err != nil {
|
||||
return delegate(req)
|
||||
}
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
return delegate(req)
|
||||
}
|
||||
|
||||
for _, cidr := range cidrs {
|
||||
if cidr.Contains(ip) {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
return delegate(req)
|
||||
}
|
||||
}
|
||||
|
||||
// tlsCacheGet is a modified copy of k8s.io/kubernetes/pkg/client/transport.tlsTransportCache.get.
|
||||
func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
|
||||
// REMOVED: any actual caching
|
||||
@@ -813,15 +908,23 @@ func tlsCacheGet(config *restConfig) (http.RoundTripper, error) {
|
||||
return http.DefaultTransport, nil
|
||||
}
|
||||
|
||||
return utilnet.SetTransportDefaults(&http.Transport{ // FIXME??
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
// REMOVED: Call to k8s.io/apimachinery/pkg/util/net.SetTransportDefaults; instead of the generic machinery and conditionals, hard-coded the result here.
|
||||
t := &http.Transport{
|
||||
// http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings
|
||||
// ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY
|
||||
Proxy: newProxierWithNoProxyCIDR(http.ProxyFromEnvironment),
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: tlsConfig,
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
}), nil
|
||||
}
|
||||
// Allow clients to disable http2 if needed.
|
||||
if s := os.Getenv("DISABLE_HTTP2"); len(s) == 0 {
|
||||
_ = http2.ConfigureTransport(t)
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// tlsConfigFor is a modified copy of k8s.io/kubernetes/pkg/client/transport.TLSConfigFor.
|
||||
@@ -832,7 +935,7 @@ func tlsConfigFor(c *restConfig) (*tls.Config, error) {
|
||||
return nil, nil
|
||||
}
|
||||
if c.HasCA() && c.Insecure {
|
||||
return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed")
|
||||
return nil, errors.Errorf("specifying a root certificates file with the insecure flag is not allowed")
|
||||
}
|
||||
if err := loadTLSFiles(c); err != nil {
|
||||
return nil, err
|
||||
|
||||
43
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
43
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -14,10 +13,12 @@ import (
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/image/version"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// openshiftClient is configuration for dealing with a single image stream, for reading or writing.
|
||||
@@ -124,7 +125,7 @@ func (c *openshiftClient) doRequest(method, path string, requestBody []byte) ([]
|
||||
if statusValid {
|
||||
return nil, errors.New(status.Message)
|
||||
}
|
||||
return nil, fmt.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
return nil, errors.Errorf("HTTP error: status code: %d, body: %s", res.StatusCode, string(body))
|
||||
}
|
||||
|
||||
return body, nil
|
||||
@@ -151,9 +152,9 @@ func (c *openshiftClient) getImage(imageStreamImageName string) (*image, error)
|
||||
func (c *openshiftClient) convertDockerImageReference(ref string) (string, error) {
|
||||
parts := strings.SplitN(ref, "/", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", fmt.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
return "", errors.Errorf("Invalid format of docker reference %s: missing '/'", ref)
|
||||
}
|
||||
return c.ref.dockerReference.Hostname() + "/" + parts[1], nil
|
||||
return reference.Domain(c.ref.dockerReference) + "/" + parts[1], nil
|
||||
}
|
||||
|
||||
type openshiftImageSource struct {
|
||||
@@ -190,11 +191,15 @@ func (s *openshiftImageSource) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
func (s *openshiftImageSource) Close() {
|
||||
func (s *openshiftImageSource) Close() error {
|
||||
if s.docker != nil {
|
||||
s.docker.Close()
|
||||
err := s.docker.Close()
|
||||
s.docker = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||
@@ -267,7 +272,7 @@ func (s *openshiftImageSource) ensureImageIsResolved() error {
|
||||
}
|
||||
}
|
||||
if te == nil {
|
||||
return fmt.Errorf("No matching tag found")
|
||||
return errors.Errorf("No matching tag found")
|
||||
}
|
||||
logrus.Debugf("tag event %#v", te)
|
||||
dockerRefString, err := s.client.convertDockerImageReference(te.DockerImageReference)
|
||||
@@ -305,7 +310,7 @@ func newImageDestination(ctx *types.SystemContext, ref openshiftReference) (type
|
||||
// FIXME: Should this always use a digest, not a tag? Uploading to Docker by tag requires the tag _inside_ the manifest to match,
|
||||
// i.e. a single signed image cannot be available under multiple tags. But with types.ImageDestination, we don't know
|
||||
// the manifest digest at this point.
|
||||
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", client.ref.dockerReference.Hostname(), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
|
||||
dockerRefString := fmt.Sprintf("//%s/%s/%s:%s", reference.Domain(client.ref.dockerReference), client.ref.namespace, client.ref.stream, client.ref.dockerReference.Tag())
|
||||
dockerRef, err := docker.ParseReference(dockerRefString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -328,8 +333,8 @@ func (d *openshiftImageDestination) Reference() types.ImageReference {
|
||||
}
|
||||
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
func (d *openshiftImageDestination) Close() {
|
||||
d.docker.Close()
|
||||
func (d *openshiftImageDestination) Close() error {
|
||||
return d.docker.Close()
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string {
|
||||
@@ -366,6 +371,18 @@ func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.Bl
|
||||
return d.docker.PutBlob(stream, inputInfo)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *openshiftImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||
return d.docker.HasBlob(info)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.docker.ReapplyBlob(info)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||
manifestDigest, err := manifest.Digest(m)
|
||||
if err != nil {
|
||||
@@ -378,7 +395,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||
|
||||
func (d *openshiftImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
if d.imageStreamImageName == "" {
|
||||
return fmt.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
||||
return errors.Errorf("Internal error: Unknown manifest digest, can't add signatures")
|
||||
}
|
||||
// Because image signatures are a shared resource in Atomic Registry, the default upload
|
||||
// always adds signatures. Eventually we should also allow removing signatures.
|
||||
@@ -410,7 +427,7 @@ sigExists:
|
||||
randBytes := make([]byte, 16)
|
||||
n, err := rand.Read(randBytes)
|
||||
if err != nil || n != 16 {
|
||||
return fmt.Errorf("Error generating random signature ID: %v, len %d", err, n)
|
||||
return errors.Wrapf(err, "Error generating random signature len %d", n)
|
||||
}
|
||||
signatureName = fmt.Sprintf("%s@%032x", d.imageStreamImageName, randBytes)
|
||||
if _, ok := existingSigNames[signatureName]; !ok {
|
||||
|
||||
23
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
23
vendor/github.com/containers/image/openshift/openshift_transport.go
generated
vendored
@@ -8,9 +8,15 @@ import (
|
||||
"github.com/containers/image/docker/policyconfiguration"
|
||||
"github.com/containers/image/docker/reference"
|
||||
genericImage "github.com/containers/image/image"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
// Transport is an ImageTransport for OpenShift registry-hosted images.
|
||||
var Transport = openshiftTransport{}
|
||||
|
||||
@@ -36,7 +42,7 @@ var scopeRegexp = regexp.MustCompile("^[^/]*(/[^:/]*(/[^:/]*(:[^:/]*)?)?)?$")
|
||||
// scope passed to this function will not be "", that value is always allowed.
|
||||
func (t openshiftTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
if scopeRegexp.FindStringIndex(scope) == nil {
|
||||
return fmt.Errorf("Invalid scope name %s", scope)
|
||||
return errors.Errorf("Invalid scope name %s", scope)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -50,22 +56,23 @@ type openshiftReference struct {
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OpenShift ImageReference.
|
||||
func ParseReference(ref string) (types.ImageReference, error) {
|
||||
r, err := reference.ParseNamed(ref)
|
||||
r, err := reference.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse image reference %q, %v", ref, err)
|
||||
return nil, errors.Wrapf(err, "failed to parse image reference %q", ref)
|
||||
}
|
||||
tagged, ok := r.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid image reference %s, %#v", ref, r)
|
||||
return nil, errors.Errorf("invalid image reference %s, expected format: 'hostname/namespace/stream:tag'", ref)
|
||||
}
|
||||
return NewReference(tagged)
|
||||
}
|
||||
|
||||
// NewReference returns an OpenShift reference for a reference.NamedTagged
|
||||
func NewReference(dockerRef reference.NamedTagged) (types.ImageReference, error) {
|
||||
r := strings.SplitN(dockerRef.RemoteName(), "/", 3)
|
||||
r := strings.SplitN(reference.Path(dockerRef), "/", 3)
|
||||
if len(r) != 2 {
|
||||
return nil, fmt.Errorf("invalid image reference %s", dockerRef.String())
|
||||
return nil, errors.Errorf("invalid image reference: %s, expected format: 'hostname/namespace/stream:tag'",
|
||||
reference.FamiliarString(dockerRef))
|
||||
}
|
||||
return openshiftReference{
|
||||
namespace: r[0],
|
||||
@@ -84,7 +91,7 @@ func (ref openshiftReference) Transport() types.ImageTransport {
|
||||
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref openshiftReference) StringWithinTransport() string {
|
||||
return ref.dockerReference.String()
|
||||
return reference.FamiliarString(ref.dockerReference)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
@@ -146,5 +153,5 @@ func (ref openshiftReference) NewImageDestination(ctx *types.SystemContext) (typ
|
||||
|
||||
// DeleteImage deletes the named image from the registry, if supported.
|
||||
func (ref openshiftReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
return fmt.Errorf("Deleting images not implemented for atomic: images")
|
||||
return errors.Errorf("Deleting images not implemented for atomic: images")
|
||||
}
|
||||
|
||||
@@ -1,41 +1,47 @@
|
||||
package copy
|
||||
package compression
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// decompressorFunc, given a compressed stream, returns the decompressed stream.
|
||||
type decompressorFunc func(io.Reader) (io.Reader, error)
|
||||
// DecompressorFunc returns the decompressed stream, given a compressed stream.
|
||||
type DecompressorFunc func(io.Reader) (io.Reader, error)
|
||||
|
||||
func gzipDecompressor(r io.Reader) (io.Reader, error) {
|
||||
// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
|
||||
func GzipDecompressor(r io.Reader) (io.Reader, error) {
|
||||
return gzip.NewReader(r)
|
||||
}
|
||||
func bzip2Decompressor(r io.Reader) (io.Reader, error) {
|
||||
|
||||
// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
|
||||
func Bzip2Decompressor(r io.Reader) (io.Reader, error) {
|
||||
return bzip2.NewReader(r), nil
|
||||
}
|
||||
func xzDecompressor(r io.Reader) (io.Reader, error) {
|
||||
|
||||
// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
|
||||
func XzDecompressor(r io.Reader) (io.Reader, error) {
|
||||
return nil, errors.New("Decompressing xz streams is not supported")
|
||||
}
|
||||
|
||||
// compressionAlgos is an internal implementation detail of detectCompression
|
||||
// compressionAlgos is an internal implementation detail of DetectCompression
|
||||
var compressionAlgos = map[string]struct {
|
||||
prefix []byte
|
||||
decompressor decompressorFunc
|
||||
decompressor DecompressorFunc
|
||||
}{
|
||||
"gzip": {[]byte{0x1F, 0x8B, 0x08}, gzipDecompressor}, // gzip (RFC 1952)
|
||||
"bzip2": {[]byte{0x42, 0x5A, 0x68}, bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
"xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, xzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
"gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952)
|
||||
"bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
"xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
}
|
||||
|
||||
// detectCompression returns a decompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func detectCompression(input io.Reader) (decompressorFunc, io.Reader, error) {
|
||||
func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
|
||||
buffer := [8]byte{}
|
||||
|
||||
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
|
||||
@@ -45,7 +51,7 @@ func detectCompression(input io.Reader) (decompressorFunc, io.Reader, error) {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var decompressor decompressorFunc
|
||||
var decompressor DecompressorFunc
|
||||
for name, algo := range compressionAlgos {
|
||||
if bytes.HasPrefix(buffer[:n], algo.prefix) {
|
||||
logrus.Debugf("Detected compression format %s", name)
|
||||
1
vendor/github.com/containers/image/pkg/strslice/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/image/pkg/strslice/README.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).
|
||||
20
vendor/github.com/containers/image/signature/docker.go
generated
vendored
20
vendor/github.com/containers/image/signature/docker.go
generated
vendored
@@ -5,8 +5,9 @@ package signature
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// SignDockerManifest returns a signature for manifest as the specified dockerReference,
|
||||
@@ -16,12 +17,7 @@ func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig := privateSignature{
|
||||
Signature{
|
||||
DockerManifestDigest: manifestDigest,
|
||||
DockerReference: dockerReference,
|
||||
},
|
||||
}
|
||||
sig := newUntrustedSignature(manifestDigest, dockerReference)
|
||||
return sig.sign(mech, keyIdentity)
|
||||
}
|
||||
|
||||
@@ -29,6 +25,10 @@ func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism,
|
||||
// using mech.
|
||||
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
|
||||
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
|
||||
expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
|
||||
validateKeyIdentity: func(keyIdentity string) error {
|
||||
if keyIdentity != expectedKeyIdentity {
|
||||
@@ -37,7 +37,11 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
|
||||
return nil
|
||||
},
|
||||
validateSignedDockerReference: func(signedDockerReference string) error {
|
||||
if signedDockerReference != expectedDockerReference {
|
||||
signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
|
||||
if err != nil {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)}
|
||||
}
|
||||
if signedRef.String() != expectedRef.String() {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
|
||||
signedDockerReference, expectedDockerReference)}
|
||||
}
|
||||
|
||||
63
vendor/github.com/containers/image/signature/json.go
generated
vendored
63
vendor/github.com/containers/image/signature/json.go
generated
vendored
@@ -14,47 +14,6 @@ func (err jsonFormatError) Error() string {
|
||||
return string(err)
|
||||
}
|
||||
|
||||
// validateExactMapKeys returns an error if the keys of m are not exactly expectedKeys, which must be pairwise distinct
|
||||
func validateExactMapKeys(m map[string]interface{}, expectedKeys ...string) error {
|
||||
if len(m) != len(expectedKeys) {
|
||||
return jsonFormatError("Unexpected keys in a JSON object")
|
||||
}
|
||||
|
||||
for _, k := range expectedKeys {
|
||||
if _, ok := m[k]; !ok {
|
||||
return jsonFormatError(fmt.Sprintf("Key %s missing in a JSON object", k))
|
||||
}
|
||||
}
|
||||
// Assuming expectedKeys are pairwise distinct, we know m contains len(expectedKeys) different values in expectedKeys.
|
||||
return nil
|
||||
}
|
||||
|
||||
// mapField returns a member fieldName of m, if it is a JSON map, or an error.
|
||||
func mapField(m map[string]interface{}, fieldName string) (map[string]interface{}, error) {
|
||||
untyped, ok := m[fieldName]
|
||||
if !ok {
|
||||
return nil, jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
||||
}
|
||||
v, ok := untyped.(map[string]interface{})
|
||||
if !ok {
|
||||
return nil, jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// stringField returns a member fieldName of m, if it is a string, or an error.
|
||||
func stringField(m map[string]interface{}, fieldName string) (string, error) {
|
||||
untyped, ok := m[fieldName]
|
||||
if !ok {
|
||||
return "", jsonFormatError(fmt.Sprintf("Field %s missing", fieldName))
|
||||
}
|
||||
v, ok := untyped.(string)
|
||||
if !ok {
|
||||
return "", jsonFormatError(fmt.Sprintf("Field %s is not a JSON object", fieldName))
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
||||
// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
|
||||
// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
|
||||
@@ -105,3 +64,25 @@ func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interfa
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
|
||||
// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
|
||||
// must be present exactly once, and none other fields are accepted.
|
||||
func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
|
||||
seenKeys := map[string]struct{}{}
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
if valuePtr, ok := exactFields[key]; ok {
|
||||
seenKeys[key] = struct{}{}
|
||||
return valuePtr
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for key := range exactFields {
|
||||
if _, ok := seenKeys[key]; !ok {
|
||||
return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
141
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
141
vendor/github.com/containers/image/signature/mechanism.go
generated
vendored
@@ -4,121 +4,82 @@ package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/mtrmac/gpgme"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
)
|
||||
|
||||
// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
|
||||
// Each mechanism should eventually be closed by calling Close().
|
||||
// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
|
||||
// eliminate ambiguities, support CA signatures and perhaps other key properties)
|
||||
type SigningMechanism interface {
|
||||
// ImportKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism).
|
||||
ImportKeysFromBytes(blob []byte) ([]string, error)
|
||||
// Sign creates a (non-detached) signature of input using keyidentity
|
||||
// Close removes resources associated with the mechanism, if any.
|
||||
Close() error
|
||||
// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
|
||||
SupportsSigning() error
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
Sign(input []byte, keyIdentity string) ([]byte, error)
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
|
||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||
// along with a short identifier of the key used for signing.
|
||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||
// the values may have no recognizable relationship if the public key is not available.
|
||||
UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
|
||||
}
|
||||
|
||||
// A GPG/OpenPGP signing mechanism.
|
||||
type gpgSigningMechanism struct {
|
||||
ctx *gpgme.Context
|
||||
// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
|
||||
type SigningNotSupportedError string
|
||||
|
||||
func (err SigningNotSupportedError) Error() string {
|
||||
return string(err)
|
||||
}
|
||||
|
||||
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism.
|
||||
// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default
|
||||
// GPG configuration ($GNUPGHOME / ~/.gnupg)
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func NewGPGSigningMechanism() (SigningMechanism, error) {
|
||||
return newGPGSigningMechanismInDirectory("")
|
||||
}
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
ctx, err := gpgme.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if optionalDir != "" {
|
||||
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx.SetArmor(false)
|
||||
ctx.SetTextMode(false)
|
||||
return gpgSigningMechanism{ctx: ctx}, nil
|
||||
// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
|
||||
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||
// of these keys.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||
return newEphemeralGPGSigningMechanism(blob)
|
||||
}
|
||||
|
||||
// ImportKeysFromBytes implements SigningMechanism.ImportKeysFromBytes
|
||||
func (m gpgSigningMechanism) ImportKeysFromBytes(blob []byte) ([]string, error) {
|
||||
inputData, err := gpgme.NewDataBytes(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := m.ctx.Import(inputData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyIdentities := []string{}
|
||||
for _, i := range res.Imports {
|
||||
if i.Result == nil {
|
||||
keyIdentities = append(keyIdentities, i.Fingerprint)
|
||||
}
|
||||
}
|
||||
return keyIdentities, nil
|
||||
}
|
||||
|
||||
// Sign implements SigningMechanism.Sign
|
||||
func (m gpgSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||
if err != nil {
|
||||
if e, ok := err.(gpgme.Error); ok && e.Code() == gpgme.ErrorEOF {
|
||||
return nil, fmt.Errorf("key %q not found", keyIdentity)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
inputData, err := gpgme.NewDataBytes(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sigBuffer bytes.Buffer
|
||||
sigData, err := gpgme.NewDataWriter(&sigBuffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sigBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Verify implements SigningMechanism.Verify
|
||||
func (m gpgSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
signedBuffer := bytes.Buffer{}
|
||||
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
||||
// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||
// along with a short identifier of the key used for signing.
|
||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||
// the values may have no recognizable relationship if the public key is not available.
|
||||
func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
// This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
|
||||
md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
|
||||
if !md.IsSigned {
|
||||
return nil, "", errors.New("The input is not a signature")
|
||||
}
|
||||
content, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
// Coverage: An error during reading the body can happen only if
|
||||
// 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
|
||||
// to decrypt the contents anyway), or
|
||||
// 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t.
|
||||
return nil, "", err
|
||||
}
|
||||
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if len(sigs) != 1 {
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
|
||||
}
|
||||
sig := sigs[0]
|
||||
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
|
||||
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
|
||||
// FIXME: Better error reporting eventually
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
|
||||
}
|
||||
return signedBuffer.Bytes(), sig.Fingerprint, nil
|
||||
|
||||
// Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints
|
||||
// (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)!
|
||||
return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil
|
||||
}
|
||||
|
||||
175
vendor/github.com/containers/image/signature/mechanism_gpgme.go
generated
vendored
Normal file
175
vendor/github.com/containers/image/signature/mechanism_gpgme.go
generated
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
// +build !containers_image_openpgp
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/mtrmac/gpgme"
|
||||
)
|
||||
|
||||
// A GPG/OpenPGP signing mechanism, implemented using gpgme.
|
||||
type gpgmeSigningMechanism struct {
|
||||
ctx *gpgme.Context
|
||||
ephemeralDir string // If not "", a directory to be removed on Close()
|
||||
}
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
ctx, err := newGPGMEContext(optionalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &gpgmeSigningMechanism{
|
||||
ctx: ctx,
|
||||
ephemeralDir: "",
|
||||
}, nil
|
||||
}
|
||||
|
||||
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
|
||||
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||
// of these keys.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||
dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
removeDir := true
|
||||
defer func() {
|
||||
if removeDir {
|
||||
os.RemoveAll(dir)
|
||||
}
|
||||
}()
|
||||
ctx, err := newGPGMEContext(dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
mech := &gpgmeSigningMechanism{
|
||||
ctx: ctx,
|
||||
ephemeralDir: dir,
|
||||
}
|
||||
keyIdentities, err := mech.importKeysFromBytes(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
removeDir = false
|
||||
return mech, keyIdentities, nil
|
||||
}
|
||||
|
||||
// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty.
|
||||
func newGPGMEContext(optionalDir string) (*gpgme.Context, error) {
|
||||
ctx, err := gpgme.New()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if optionalDir != "" {
|
||||
err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
ctx.SetArmor(false)
|
||||
ctx.SetTextMode(false)
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func (m *gpgmeSigningMechanism) Close() error {
|
||||
if m.ephemeralDir != "" {
|
||||
os.RemoveAll(m.ephemeralDir) // Ignore an error, if any
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||
// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism);
|
||||
// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism.
|
||||
func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
|
||||
inputData, err := gpgme.NewDataBytes(blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := m.ctx.Import(inputData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyIdentities := []string{}
|
||||
for _, i := range res.Imports {
|
||||
if i.Result == nil {
|
||||
keyIdentities = append(keyIdentities, i.Fingerprint)
|
||||
}
|
||||
}
|
||||
return keyIdentities, nil
|
||||
}
|
||||
|
||||
// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
|
||||
func (m *gpgmeSigningMechanism) SupportsSigning() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
key, err := m.ctx.GetKey(keyIdentity, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inputData, err := gpgme.NewDataBytes(input)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var sigBuffer bytes.Buffer
|
||||
sigData, err := gpgme.NewDataWriter(&sigBuffer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sigBuffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
signedBuffer := bytes.Buffer{}
|
||||
signedData, err := gpgme.NewDataWriter(&signedBuffer)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
_, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if len(sigs) != 1 {
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
|
||||
}
|
||||
sig := sigs[0]
|
||||
// This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
|
||||
if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
|
||||
// FIXME: Better error reporting eventually
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
|
||||
}
|
||||
return signedBuffer.Bytes(), sig.Fingerprint, nil
|
||||
}
|
||||
|
||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||
// along with a short identifier of the key used for signing.
|
||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||
// the values may have no recognizable relationship if the public key is not available.
|
||||
func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||
}
|
||||
153
vendor/github.com/containers/image/signature/mechanism_openpgp.go
generated
vendored
Normal file
153
vendor/github.com/containers/image/signature/mechanism_openpgp.go
generated
vendored
Normal file
@@ -0,0 +1,153 @@
|
||||
// +build containers_image_openpgp
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
)
|
||||
|
||||
// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
|
||||
type openpgpSigningMechanism struct {
|
||||
keyring openpgp.EntityList
|
||||
}
|
||||
|
||||
// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
|
||||
m := &openpgpSigningMechanism{
|
||||
keyring: openpgp.EntityList{},
|
||||
}
|
||||
|
||||
gpgHome := optionalDir
|
||||
if gpgHome == "" {
|
||||
gpgHome = os.Getenv("GNUPGHOME")
|
||||
if gpgHome == "" {
|
||||
gpgHome = path.Join(homedir.Get(), ".gnupg")
|
||||
}
|
||||
}
|
||||
|
||||
pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
_, err := m.importKeysFromBytes(pubring)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
|
||||
// recognizes _only_ public keys from the supplied blob, and returns the identities
|
||||
// of these keys.
|
||||
// The caller must call .Close() on the returned SigningMechanism.
|
||||
func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
|
||||
m := &openpgpSigningMechanism{
|
||||
keyring: openpgp.EntityList{},
|
||||
}
|
||||
keyIdentities, err := m.importKeysFromBytes(blob)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return m, keyIdentities, nil
|
||||
}
|
||||
|
||||
func (m *openpgpSigningMechanism) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
|
||||
// The blob is assumed to have an appropriate format (the caller is expected to know which one).
|
||||
func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
|
||||
keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob))
|
||||
if err != nil {
|
||||
k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob))
|
||||
if e2 != nil {
|
||||
return nil, err // The original error -- FIXME: is this better?
|
||||
}
|
||||
keyring = k
|
||||
}
|
||||
|
||||
keyIdentities := []string{}
|
||||
for _, entity := range keyring {
|
||||
if entity.PrimaryKey == nil {
|
||||
// Coverage: This should never happen, openpgp.ReadEntity fails with a
|
||||
// openpgp.errors.StructuralError instead of returning an entity with this
|
||||
// field set to nil.
|
||||
continue
|
||||
}
|
||||
// Uppercase the fingerprint to be compatible with gpgme
|
||||
keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)))
|
||||
m.keyring = append(m.keyring, entity)
|
||||
}
|
||||
return keyIdentities, nil
|
||||
}
|
||||
|
||||
// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
|
||||
func (m *openpgpSigningMechanism) SupportsSigning() error {
|
||||
return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
|
||||
}
|
||||
|
||||
// Sign creates a (non-detached) signature of input using keyIdentity.
|
||||
// Fails with a SigningNotSupportedError if the mechanism does not support signing.
|
||||
func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
|
||||
return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
|
||||
}
|
||||
|
||||
// Verify parses unverifiedSignature and returns the content and the signer's identity
|
||||
func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
|
||||
md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
if !md.IsSigned {
|
||||
return nil, "", errors.New("not signed")
|
||||
}
|
||||
content, err := ioutil.ReadAll(md.UnverifiedBody)
|
||||
if err != nil {
|
||||
// Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
|
||||
// (and possibly also signed, but it _must_ be encrypted) and the signing
|
||||
// “modification detection code” detects a mismatch. But in that case,
|
||||
// we would expect the signature verification to fail as well, and that is checked
|
||||
// first. Besides, we are not supplying any decryption keys, so we really
|
||||
// can never reach this “encrypted data MDC mismatch” path.
|
||||
return nil, "", err
|
||||
}
|
||||
if md.SignatureError != nil {
|
||||
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
|
||||
}
|
||||
if md.SignedBy == nil {
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
|
||||
}
|
||||
if md.Signature.SigLifetimeSecs != nil {
|
||||
expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
|
||||
if time.Now().After(expiry) {
|
||||
return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
|
||||
}
|
||||
}
|
||||
|
||||
// Uppercase the fingerprint to be compatible with gpgme
|
||||
return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil
|
||||
}
|
||||
|
||||
// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
|
||||
// along with a short identifier of the key used for signing.
|
||||
// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
|
||||
// is NOT the same as a "key identity" used in other calls ot this interface, and
|
||||
// the values may have no recognizable relationship if the public key is not available.
|
||||
func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
|
||||
return gpgUntrustedSignatureContents(untrustedSignature)
|
||||
}
|
||||
104
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
104
vendor/github.com/containers/image/signature/policy_config.go
generated
vendored
@@ -15,7 +15,6 @@ package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
|
||||
@@ -41,8 +41,6 @@ func (err InvalidPolicyFormatError) Error() string {
|
||||
return string(err)
|
||||
}
|
||||
|
||||
// FIXME: NewDefaultPolicy, from default file (or environment if trusted?)
|
||||
|
||||
// DefaultPolicy returns the default policy of the system.
|
||||
// Most applications should be using this method to get the policy configured
|
||||
// by the system administrator.
|
||||
@@ -124,10 +122,8 @@ func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
|
||||
// So, use a temporary map of pointers-to-slices and convert.
|
||||
tmpMap := map[string]*PolicyTransportScopes{}
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
transport, ok := transports.KnownTransports[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// transport can be nil
|
||||
transport := transports.Get(key)
|
||||
// paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
|
||||
if _, ok := tmpMap[key]; ok {
|
||||
return nil
|
||||
@@ -157,7 +153,7 @@ func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error {
|
||||
}
|
||||
|
||||
// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes
|
||||
// while validating using a specific ImageTransport.
|
||||
// while validating using a specific ImageTransport if not nil.
|
||||
type policyTransportScopesWithTransport struct {
|
||||
transport types.ImageTransport
|
||||
dest *PolicyTransportScopes
|
||||
@@ -176,7 +172,7 @@ func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
|
||||
if _, ok := tmpMap[key]; ok {
|
||||
return nil
|
||||
}
|
||||
if key != "" {
|
||||
if key != "" && m.transport != nil {
|
||||
if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -259,13 +255,8 @@ var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
|
||||
func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
|
||||
*pr = prInsecureAcceptAnything{}
|
||||
var tmp prInsecureAcceptAnything
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -294,13 +285,8 @@ var _ json.Unmarshaler = (*prReject)(nil)
|
||||
func (pr *prReject) UnmarshalJSON(data []byte) error {
|
||||
*pr = prReject{}
|
||||
var tmp prReject
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -407,7 +393,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
|
||||
case !gotKeyPath && !gotKeyData:
|
||||
return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
|
||||
default: // Coverage: This should never happen
|
||||
return fmt.Errorf("Impossible keyPath/keyData presence combination!?")
|
||||
return errors.Errorf("Impossible keyPath/keyData presence combination!?")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -469,15 +455,9 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
||||
*pr = prSignedBaseLayer{}
|
||||
var tmp prSignedBaseLayer
|
||||
var baseLayerIdentity json.RawMessage
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "baseLayerIdentity":
|
||||
return &baseLayerIdentity
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
"baseLayerIdentity": &baseLayerIdentity,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -485,9 +465,6 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
|
||||
if tmp.Type != prTypeSignedBaseLayer {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
|
||||
}
|
||||
if baseLayerIdentity == nil {
|
||||
return InvalidPolicyFormatError(fmt.Sprintf("baseLayerIdentity not specified"))
|
||||
}
|
||||
bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -545,13 +522,8 @@ var _ json.Unmarshaler = (*prmMatchExact)(nil)
|
||||
func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchExact{}
|
||||
var tmp prmMatchExact
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -580,13 +552,8 @@ var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
|
||||
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchRepoDigestOrExact{}
|
||||
var tmp prmMatchRepoDigestOrExact
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -615,13 +582,8 @@ var _ json.Unmarshaler = (*prmMatchRepository)(nil)
|
||||
func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmMatchRepository{}
|
||||
var tmp prmMatchRepository
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -635,7 +597,7 @@ func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
|
||||
func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
|
||||
ref, err := reference.ParseNamed(dockerReference)
|
||||
ref, err := reference.ParseNormalizedNamed(dockerReference)
|
||||
if err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
|
||||
}
|
||||
@@ -660,15 +622,9 @@ var _ json.Unmarshaler = (*prmExactReference)(nil)
|
||||
func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmExactReference{}
|
||||
var tmp prmExactReference
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "dockerReference":
|
||||
return &tmp.DockerReference
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
"dockerReference": &tmp.DockerReference,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -687,7 +643,7 @@ func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
|
||||
func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
|
||||
if _, err := reference.ParseNamed(dockerRepository); err != nil {
|
||||
if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
|
||||
return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
|
||||
}
|
||||
return &prmExactRepository{
|
||||
@@ -708,15 +664,9 @@ var _ json.Unmarshaler = (*prmExactRepository)(nil)
|
||||
func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
|
||||
*prm = prmExactRepository{}
|
||||
var tmp prmExactRepository
|
||||
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
|
||||
switch key {
|
||||
case "type":
|
||||
return &tmp.Type
|
||||
case "dockerRepository":
|
||||
return &tmp.DockerRepository
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"type": &tmp.Type,
|
||||
"dockerRepository": &tmp.DockerRepository,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
5
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
5
vendor/github.com/containers/image/signature/policy_eval.go
generated
vendored
@@ -6,10 +6,9 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
|
||||
@@ -95,7 +94,7 @@ const (
|
||||
// changeContextState changes pc.state, or fails if the state is unexpected
|
||||
func (pc *PolicyContext) changeState(expected, new policyContextState) error {
|
||||
if pc.state != expected {
|
||||
return fmt.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
|
||||
}
|
||||
pc.state = new
|
||||
return nil
|
||||
|
||||
25
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
25
vendor/github.com/containers/image/signature/policy_eval_signedby.go
generated
vendored
@@ -3,15 +3,15 @@
|
||||
package signature
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
|
||||
@@ -19,10 +19,10 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
|
||||
case SBKeyTypeGPGKeys:
|
||||
case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
|
||||
// FIXME? Reject this at policy parsing time already?
|
||||
return sarRejected, nil, fmt.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
||||
return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
|
||||
default:
|
||||
// This should never happen, newPRSignedBy ensures KeyType.IsValid()
|
||||
return sarRejected, nil, fmt.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
|
||||
return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
|
||||
}
|
||||
|
||||
if pr.KeyPath != "" && pr.KeyData != nil {
|
||||
@@ -41,20 +41,11 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
|
||||
}
|
||||
|
||||
// FIXME: move this to per-context initialization
|
||||
dir, err := ioutil.TempDir("", "skopeo-signedBy-")
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
defer os.RemoveAll(dir)
|
||||
mech, err := newGPGSigningMechanismInDirectory(dir)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
|
||||
trustedIdentities, err := mech.ImportKeysFromBytes(data)
|
||||
mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
|
||||
if err != nil {
|
||||
return sarRejected, nil, err
|
||||
}
|
||||
defer mech.Close()
|
||||
if len(trustedIdentities) == 0 {
|
||||
return sarRejected, nil, PolicyRequirementError("No public keys imported")
|
||||
}
|
||||
@@ -116,7 +107,7 @@ func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, er
|
||||
// Huh?! This should not happen at all; treat it as any other invalid value.
|
||||
fallthrough
|
||||
default:
|
||||
reason = fmt.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
|
||||
}
|
||||
rejections = append(rejections, reason)
|
||||
}
|
||||
|
||||
6
vendor/github.com/containers/image/signature/policy_reference_match.go
generated
vendored
6
vendor/github.com/containers/image/signature/policy_reference_match.go
generated
vendored
@@ -17,7 +17,7 @@ func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (referen
|
||||
return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
|
||||
transports.ImageName(image.Reference())))
|
||||
}
|
||||
r2, err := reference.ParseNamed(s2)
|
||||
r2, err := reference.ParseNormalizedNamed(s2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -69,11 +69,11 @@ func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage,
|
||||
|
||||
// parseDockerReferences converts two reference strings into parsed entities, failing on any error
|
||||
func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
|
||||
r1, err := reference.ParseNamed(s1)
|
||||
r1, err := reference.ParseNormalizedNamed(s1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
r2, err := reference.ParseNamed(s2)
|
||||
r2, err := reference.ParseNormalizedNamed(s2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
232
vendor/github.com/containers/image/signature/signature.go
generated
vendored
232
vendor/github.com/containers/image/signature/signature.go
generated
vendored
@@ -4,12 +4,13 @@ package signature
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containers/image/version"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -26,37 +27,74 @@ func (err InvalidSignatureError) Error() string {
|
||||
}
|
||||
|
||||
// Signature is a parsed content of a signature.
|
||||
// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
|
||||
type Signature struct {
|
||||
DockerManifestDigest digest.Digest
|
||||
DockerReference string // FIXME: more precise type?
|
||||
}
|
||||
|
||||
// Wrap signature to add to it some methods which we don't want to make public.
|
||||
type privateSignature struct {
|
||||
Signature
|
||||
// untrustedSignature is a parsed content of a signature.
|
||||
type untrustedSignature struct {
|
||||
UntrustedDockerManifestDigest digest.Digest
|
||||
UntrustedDockerReference string // FIXME: more precise type?
|
||||
UntrustedCreatorID *string
|
||||
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
|
||||
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
|
||||
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
|
||||
// we would add another field, UntrustedTimestampNS int64.
|
||||
UntrustedTimestamp *int64
|
||||
}
|
||||
|
||||
// Compile-time check that privateSignature implements json.Marshaler
|
||||
var _ json.Marshaler = (*privateSignature)(nil)
|
||||
// UntrustedSignatureInformation is information available in an untrusted signature.
|
||||
// This may be useful when debugging signature verification failures,
|
||||
// or when managing a set of signatures on a single image.
|
||||
//
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
type UntrustedSignatureInformation struct {
|
||||
UntrustedDockerManifestDigest digest.Digest
|
||||
UntrustedDockerReference string // FIXME: more precise type?
|
||||
UntrustedCreatorID *string
|
||||
UntrustedTimestamp *time.Time
|
||||
UntrustedShortKeyIdentifier string
|
||||
}
|
||||
|
||||
// newUntrustedSignature returns an untrustedSignature object with
|
||||
// the specified primary contents and appropriate metadata.
|
||||
func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
|
||||
// Use intermediate variables for these values so that we can take their addresses.
|
||||
// Golang guarantees that they will have a new address on every execution.
|
||||
creatorID := "atomic " + version.Version
|
||||
timestamp := time.Now().Unix()
|
||||
return untrustedSignature{
|
||||
UntrustedDockerManifestDigest: dockerManifestDigest,
|
||||
UntrustedDockerReference: dockerReference,
|
||||
UntrustedCreatorID: &creatorID,
|
||||
UntrustedTimestamp: ×tamp,
|
||||
}
|
||||
}
|
||||
|
||||
// Compile-time check that untrustedSignature implements json.Marshaler
|
||||
var _ json.Marshaler = (*untrustedSignature)(nil)
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
func (s privateSignature) MarshalJSON() ([]byte, error) {
|
||||
return s.marshalJSONWithVariables(time.Now().UTC().Unix(), "atomic "+version.Version)
|
||||
}
|
||||
|
||||
// Implementation of MarshalJSON, with a caller-chosen values of the variable items to help testing.
|
||||
func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID string) ([]byte, error) {
|
||||
if s.DockerManifestDigest == "" || s.DockerReference == "" {
|
||||
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
|
||||
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
|
||||
return nil, errors.New("Unexpected empty signature content")
|
||||
}
|
||||
critical := map[string]interface{}{
|
||||
"type": signatureType,
|
||||
"image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.DockerReference},
|
||||
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
|
||||
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
|
||||
}
|
||||
optional := map[string]interface{}{
|
||||
"creator": creatorID,
|
||||
"timestamp": timestamp,
|
||||
optional := map[string]interface{}{}
|
||||
if s.UntrustedCreatorID != nil {
|
||||
optional["creator"] = *s.UntrustedCreatorID
|
||||
}
|
||||
if s.UntrustedTimestamp != nil {
|
||||
optional["timestamp"] = *s.UntrustedTimestamp
|
||||
}
|
||||
signature := map[string]interface{}{
|
||||
"critical": critical,
|
||||
@@ -65,11 +103,11 @@ func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID st
|
||||
return json.Marshal(signature)
|
||||
}
|
||||
|
||||
// Compile-time check that privateSignature implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*privateSignature)(nil)
|
||||
// Compile-time check that untrustedSignature implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*untrustedSignature)(nil)
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *privateSignature) UnmarshalJSON(data []byte) error {
|
||||
func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
|
||||
err := s.strictUnmarshalJSON(data)
|
||||
if err != nil {
|
||||
if _, ok := err.(jsonFormatError); ok {
|
||||
@@ -81,72 +119,81 @@ func (s *privateSignature) UnmarshalJSON(data []byte) error {
|
||||
|
||||
// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
|
||||
// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
|
||||
func (s *privateSignature) strictUnmarshalJSON(data []byte) error {
|
||||
var untyped interface{}
|
||||
if err := json.Unmarshal(data, &untyped); err != nil {
|
||||
return err
|
||||
}
|
||||
o, ok := untyped.(map[string]interface{})
|
||||
if !ok {
|
||||
return InvalidSignatureError{msg: "Invalid signature format"}
|
||||
}
|
||||
if err := validateExactMapKeys(o, "critical", "optional"); err != nil {
|
||||
func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
|
||||
var critical, optional json.RawMessage
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
|
||||
"critical": &critical,
|
||||
"optional": &optional,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c, err := mapField(o, "critical")
|
||||
if err != nil {
|
||||
var creatorID string
|
||||
var timestamp float64
|
||||
var gotCreatorID, gotTimestamp = false, false
|
||||
if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
|
||||
switch key {
|
||||
case "creator":
|
||||
gotCreatorID = true
|
||||
return &creatorID
|
||||
case "timestamp":
|
||||
gotTimestamp = true
|
||||
return ×tamp
|
||||
default:
|
||||
var ignore interface{}
|
||||
return &ignore
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(c, "type", "image", "identity"); err != nil {
|
||||
return err
|
||||
if gotCreatorID {
|
||||
s.UntrustedCreatorID = &creatorID
|
||||
}
|
||||
if gotTimestamp {
|
||||
intTimestamp := int64(timestamp)
|
||||
if float64(intTimestamp) != timestamp {
|
||||
return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
|
||||
}
|
||||
s.UntrustedTimestamp = &intTimestamp
|
||||
}
|
||||
|
||||
optional, err := mapField(o, "optional")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = optional // We don't use anything from here for now.
|
||||
|
||||
t, err := stringField(c, "type")
|
||||
if err != nil {
|
||||
var t string
|
||||
var image, identity json.RawMessage
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
|
||||
"type": &t,
|
||||
"image": &image,
|
||||
"identity": &identity,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if t != signatureType {
|
||||
return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
|
||||
}
|
||||
|
||||
image, err := mapField(c, "image")
|
||||
if err != nil {
|
||||
var digestString string
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
|
||||
"docker-manifest-digest": &digestString,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
|
||||
return err
|
||||
}
|
||||
digestString, err := stringField(image, "docker-manifest-digest")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DockerManifestDigest = digest.Digest(digestString)
|
||||
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
|
||||
|
||||
identity, err := mapField(c, "identity")
|
||||
if err != nil {
|
||||
if err := paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
|
||||
"docker-reference": &s.UntrustedDockerReference,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateExactMapKeys(identity, "docker-reference"); err != nil {
|
||||
return err
|
||||
}
|
||||
reference, err := stringField(identity, "docker-reference")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.DockerReference = reference
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sign formats the signature and returns a blob signed using mech and keyIdentity
|
||||
func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
// (If it seems surprising that this is a method on untrustedSignature, note that there
|
||||
// isn’t a good reason to think that a key used by the user is trusted by any component
|
||||
// of the system just because it is a private key — actually the presence of a private key
|
||||
// on the system increases the likelihood of an a successful attack on that private key
|
||||
// on that particular system.)
|
||||
func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
|
||||
json, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -158,7 +205,7 @@ func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byt
|
||||
// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
|
||||
// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
|
||||
// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
|
||||
// because all of the functions have the same type, so there is a risk of exchanging the functions;
|
||||
// because the functions have the same or similar types, so there is a risk of exchanging the functions;
|
||||
// named members of this struct are more explicit.
|
||||
type signatureAcceptanceRules struct {
|
||||
validateKeyIdentity func(string) error
|
||||
@@ -177,16 +224,59 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var unmatchedSignature privateSignature
|
||||
var unmatchedSignature untrustedSignature
|
||||
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
|
||||
return nil, InvalidSignatureError{msg: err.Error()}
|
||||
}
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.DockerManifestDigest); err != nil {
|
||||
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.DockerReference); err != nil {
|
||||
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
signature := unmatchedSignature.Signature // Policy OK.
|
||||
return &signature, nil
|
||||
// signatureAcceptanceRules have accepted this value.
|
||||
return &Signature{
|
||||
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
|
||||
DockerReference: unmatchedSignature.UntrustedDockerReference,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
|
||||
// WITHOUT doing any cryptographic verification.
|
||||
// This may be useful when debugging signature verification failures,
|
||||
// or when managing a set of signatures on a single image.
|
||||
//
|
||||
// WARNING: Do not use the contents of this for ANY security decisions,
|
||||
// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
|
||||
// There is NO REASON to expect the values to be correct, or not intentionally misleading
|
||||
// (including things like “✅ Verified by $authority”)
|
||||
func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
|
||||
// NOTE: This should eventualy do format autodetection.
|
||||
mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mech.Close()
|
||||
|
||||
untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var untrustedDecodedContents untrustedSignature
|
||||
if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
|
||||
return nil, InvalidSignatureError{msg: err.Error()}
|
||||
}
|
||||
|
||||
var timestamp *time.Time // = nil
|
||||
if untrustedDecodedContents.UntrustedTimestamp != nil {
|
||||
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
|
||||
timestamp = &ts
|
||||
}
|
||||
return &UntrustedSignatureInformation{
|
||||
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
|
||||
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
|
||||
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
|
||||
UntrustedTimestamp: timestamp,
|
||||
UntrustedShortKeyIdentifier: shortKeyIdentifier,
|
||||
}, nil
|
||||
}
|
||||
|
||||
588
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
Normal file
588
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
Normal file
@@ -0,0 +1,588 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
"github.com/containers/storage/storage"
|
||||
ddigest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrBlobDigestMismatch is returned when PutBlob() is given a blob
|
||||
// with a digest-based name that doesn't match its contents.
|
||||
ErrBlobDigestMismatch = errors.New("blob digest mismatch")
|
||||
// ErrBlobSizeMismatch is returned when PutBlob() is given a blob
|
||||
// with an expected size that doesn't match the reader.
|
||||
ErrBlobSizeMismatch = errors.New("blob size mismatch")
|
||||
// ErrNoManifestLists is returned when GetTargetManifest() is
|
||||
// called.
|
||||
ErrNoManifestLists = errors.New("manifest lists are not supported by this transport")
|
||||
// ErrNoSuchImage is returned when we attempt to access an image which
|
||||
// doesn't exist in the storage area.
|
||||
ErrNoSuchImage = storage.ErrNotAnImage
|
||||
)
|
||||
|
||||
type storageImageSource struct {
|
||||
imageRef storageReference
|
||||
Tag string `json:"tag,omitempty"`
|
||||
Created time.Time `json:"created-time,omitempty"`
|
||||
ID string `json:"id"`
|
||||
BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
|
||||
Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
|
||||
LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers
|
||||
SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
type storageImageDestination struct {
|
||||
imageRef storageReference
|
||||
Tag string `json:"tag,omitempty"`
|
||||
Created time.Time `json:"created-time,omitempty"`
|
||||
ID string `json:"id"`
|
||||
BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle
|
||||
Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs
|
||||
BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary
|
||||
Manifest []byte `json:"-"` // Manifest contents, temporary
|
||||
Signatures []byte `json:"-"` // Signature contents, temporary
|
||||
SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
type storageLayerMetadata struct {
|
||||
Digest string `json:"digest,omitempty"`
|
||||
Size int64 `json:"size"`
|
||||
CompressedSize int64 `json:"compressed-size,omitempty"`
|
||||
}
|
||||
|
||||
type storageImage struct {
|
||||
types.Image
|
||||
size int64
|
||||
}
|
||||
|
||||
// newImageSource sets us up to read out an image, which needs to already exist.
|
||||
func newImageSource(imageRef storageReference) (*storageImageSource, error) {
|
||||
id := imageRef.resolveID()
|
||||
if id == "" {
|
||||
logrus.Errorf("no image matching reference %q found", imageRef.StringWithinTransport())
|
||||
return nil, ErrNoSuchImage
|
||||
}
|
||||
img, err := imageRef.transport.store.GetImage(id)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error reading image %q", id)
|
||||
}
|
||||
image := &storageImageSource{
|
||||
imageRef: imageRef,
|
||||
Created: time.Now(),
|
||||
ID: img.ID,
|
||||
BlobList: []types.BlobInfo{},
|
||||
Layers: make(map[ddigest.Digest][]string),
|
||||
LayerPosition: make(map[ddigest.Digest]int),
|
||||
SignatureSizes: []int{},
|
||||
}
|
||||
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
|
||||
return nil, errors.Wrap(err, "error decoding metadata for source image")
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
// newImageDestination sets us up to write a new image.
|
||||
func newImageDestination(imageRef storageReference) (*storageImageDestination, error) {
|
||||
image := &storageImageDestination{
|
||||
imageRef: imageRef,
|
||||
Tag: imageRef.reference,
|
||||
Created: time.Now(),
|
||||
ID: imageRef.id,
|
||||
BlobList: []types.BlobInfo{},
|
||||
Layers: make(map[ddigest.Digest][]string),
|
||||
BlobData: make(map[ddigest.Digest][]byte),
|
||||
SignatureSizes: []int{},
|
||||
}
|
||||
return image, nil
|
||||
}
|
||||
|
||||
func (s storageImageSource) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
func (s storageImageDestination) Reference() types.ImageReference {
|
||||
return s.imageRef
|
||||
}
|
||||
|
||||
func (s storageImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s storageImageDestination) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s storageImageDestination) ShouldCompressLayers() bool {
|
||||
// We ultimately have to decompress layers to populate trees on disk,
|
||||
// so callers shouldn't bother compressing them before handing them to
|
||||
// us, if they're not already compressed.
|
||||
return false
|
||||
}
|
||||
|
||||
// putBlob stores a layer or data blob, optionally enforcing that a digest in
|
||||
// blobinfo matches the incoming data.
|
||||
func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) {
|
||||
blobSize := blobinfo.Size
|
||||
digest := blobinfo.Digest
|
||||
errorBlobInfo := types.BlobInfo{
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
// Try to read an initial snippet of the blob.
|
||||
header := make([]byte, 10240)
|
||||
n, err := stream.Read(header)
|
||||
if err != nil && err != io.EOF {
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
// Set up to read the whole blob (the initial snippet, plus the rest)
|
||||
// while digesting it with either the default, or the passed-in digest,
|
||||
// if one was specified.
|
||||
hasher := ddigest.Canonical.Digester()
|
||||
if digest.Validate() == nil {
|
||||
if a := digest.Algorithm(); a.Available() {
|
||||
hasher = a.Digester()
|
||||
}
|
||||
}
|
||||
hash := ""
|
||||
counter := ioutils.NewWriteCounter(hasher.Hash())
|
||||
defragmented := io.MultiReader(bytes.NewBuffer(header[:n]), stream)
|
||||
multi := io.TeeReader(defragmented, counter)
|
||||
if (n > 0) && archive.IsArchive(header[:n]) {
|
||||
// It's a filesystem layer. If it's not the first one in the
|
||||
// image, we assume that the most recently added layer is its
|
||||
// parent.
|
||||
parentLayer := ""
|
||||
for _, blob := range s.BlobList {
|
||||
if layerList, ok := s.Layers[blob.Digest]; ok {
|
||||
parentLayer = layerList[len(layerList)-1]
|
||||
}
|
||||
}
|
||||
// If we have an expected content digest, generate a layer ID
|
||||
// based on the parent's ID and the expected content digest.
|
||||
id := ""
|
||||
if digest.Validate() == nil {
|
||||
id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex()
|
||||
}
|
||||
// Attempt to create the identified layer and import its contents.
|
||||
layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi)
|
||||
if err != nil && err != storage.ErrDuplicateID {
|
||||
logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err)
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
if err == storage.ErrDuplicateID {
|
||||
// We specified an ID, and there's already a layer with
|
||||
// the same ID. Drain the input so that we can look at
|
||||
// its length and digest.
|
||||
_, err := io.Copy(ioutil.Discard, multi)
|
||||
if err != nil && err != io.EOF {
|
||||
logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err)
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
hash = hasher.Digest().String()
|
||||
} else {
|
||||
// Applied the layer with the specified ID. Note the
|
||||
// size info and computed digest.
|
||||
hash = hasher.Digest().String()
|
||||
layerMeta := storageLayerMetadata{
|
||||
Digest: hash,
|
||||
CompressedSize: counter.Count,
|
||||
Size: uncompressedSize,
|
||||
}
|
||||
if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil {
|
||||
s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata))
|
||||
}
|
||||
// Hang on to the new layer's ID.
|
||||
id = layer.ID
|
||||
}
|
||||
// Check if the size looks right.
|
||||
if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count {
|
||||
logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size)
|
||||
if layer != nil {
|
||||
// Something's wrong; delete the newly-created layer.
|
||||
s.imageRef.transport.store.DeleteLayer(layer.ID)
|
||||
}
|
||||
return errorBlobInfo, ErrBlobSizeMismatch
|
||||
}
|
||||
// If the content digest was specified, verify it.
|
||||
if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
|
||||
logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash)
|
||||
if layer != nil {
|
||||
// Something's wrong; delete the newly-created layer.
|
||||
s.imageRef.transport.store.DeleteLayer(layer.ID)
|
||||
}
|
||||
return errorBlobInfo, ErrBlobDigestMismatch
|
||||
}
|
||||
// If we didn't get a blob size, return the one we calculated.
|
||||
if blobSize == -1 {
|
||||
blobSize = counter.Count
|
||||
}
|
||||
// If we didn't get a digest, construct one.
|
||||
if digest == "" {
|
||||
digest = ddigest.Digest(hash)
|
||||
}
|
||||
// Record that this layer blob is a layer, and the layer ID it
|
||||
// ended up having. This is a list, in case the same blob is
|
||||
// being applied more than once.
|
||||
s.Layers[digest] = append(s.Layers[digest], id)
|
||||
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count})
|
||||
if layer != nil {
|
||||
logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id)
|
||||
} else {
|
||||
logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id)
|
||||
}
|
||||
} else {
|
||||
// It's just data. Finish scanning it in, check that our
|
||||
// computed digest matches the passed-in digest, and store it,
|
||||
// but leave it out of the blob-to-layer-ID map so that we can
|
||||
// tell that it's not a layer.
|
||||
blob, err := ioutil.ReadAll(multi)
|
||||
if err != nil && err != io.EOF {
|
||||
return errorBlobInfo, err
|
||||
}
|
||||
hash = hasher.Digest().String()
|
||||
if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size {
|
||||
logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size)
|
||||
return errorBlobInfo, ErrBlobSizeMismatch
|
||||
}
|
||||
// If we were given a digest, verify that the content matches
|
||||
// it.
|
||||
if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash {
|
||||
logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash)
|
||||
return errorBlobInfo, ErrBlobDigestMismatch
|
||||
}
|
||||
// If we didn't get a blob size, return the one we calculated.
|
||||
if blobSize == -1 {
|
||||
blobSize = int64(len(blob))
|
||||
}
|
||||
// If we didn't get a digest, construct one.
|
||||
if digest == "" {
|
||||
digest = ddigest.Digest(hash)
|
||||
}
|
||||
// Save the blob for when we Commit().
|
||||
s.BlobData[digest] = blob
|
||||
s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))})
|
||||
logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest)
|
||||
}
|
||||
return types.BlobInfo{
|
||||
Digest: digest,
|
||||
Size: blobSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutBlob is used to both store filesystem layers and binary data that is part
|
||||
// of the image. Filesystem layers are assumed to be imported in order, as
|
||||
// that is required by some of the underlying storage drivers.
|
||||
func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
return s.putBlob(stream, blobinfo, true)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, error) {
|
||||
if blobinfo.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
for _, blob := range s.BlobList {
|
||||
if blob.Digest == blobinfo.Digest {
|
||||
return true, blob.Size, nil
|
||||
}
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
err := blobinfo.Digest.Validate()
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 {
|
||||
b, err := s.imageRef.transport.store.GetImageBigData(s.ID, blobinfo.Digest.String())
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil
|
||||
}
|
||||
layerList := s.Layers[blobinfo.Digest]
|
||||
rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1])
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
return s.putBlob(rc, blobinfo, false)
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) Commit() error {
|
||||
// Create the image record.
|
||||
lastLayer := ""
|
||||
for _, blob := range s.BlobList {
|
||||
if layerList, ok := s.Layers[blob.Digest]; ok {
|
||||
lastLayer = layerList[len(layerList)-1]
|
||||
}
|
||||
}
|
||||
img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil)
|
||||
if err != nil {
|
||||
logrus.Debugf("error creating image: %q", err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("created new image ID %q", img.ID)
|
||||
s.ID = img.ID
|
||||
if s.Tag != "" {
|
||||
// We have a name to set, so move the name to this image.
|
||||
if err := s.imageRef.transport.store.SetNames(img.ID, []string{s.Tag}); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error setting names on image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("set name of image %q to %q", img.ID, s.Tag)
|
||||
}
|
||||
// Save the data blobs to disk, and drop their contents from memory.
|
||||
keys := []ddigest.Digest{}
|
||||
for k, v := range s.BlobData {
|
||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err)
|
||||
return err
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
for _, key := range keys {
|
||||
delete(s.BlobData, key)
|
||||
}
|
||||
// Save the manifest, if we have one.
|
||||
if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
// Save the signatures, if we have any.
|
||||
if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
// Save our metadata.
|
||||
metadata, err := json.Marshal(s)
|
||||
if err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
if len(metadata) != 0 {
|
||||
if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil {
|
||||
if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil {
|
||||
logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2)
|
||||
}
|
||||
logrus.Debugf("error saving metadata for image %q: %v", img.ID, err)
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("saved image metadata %q", string(metadata))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) SupportedManifestMIMETypes() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) PutManifest(manifest []byte) error {
|
||||
s.Manifest = make([]byte, len(manifest))
|
||||
copy(s.Manifest, manifest)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SupportsSignatures returns an error if we can't expect GetSignatures() to
|
||||
// return data that was previously supplied to PutSignatures().
|
||||
func (s *storageImageDestination) SupportsSignatures() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||
// uploaded to the image destination, true otherwise.
|
||||
func (s *storageImageDestination) AcceptsForeignLayerURLs() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *storageImageDestination) PutSignatures(signatures [][]byte) error {
|
||||
sizes := []int{}
|
||||
sigblob := []byte{}
|
||||
for _, sig := range signatures {
|
||||
sizes = append(sizes, len(sig))
|
||||
newblob := make([]byte, len(sigblob)+len(sig))
|
||||
copy(newblob, sigblob)
|
||||
copy(newblob[len(sigblob):], sig)
|
||||
sigblob = newblob
|
||||
}
|
||||
s.Signatures = sigblob
|
||||
s.SignatureSizes = sizes
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
rc, n, _, err = s.getBlobAndLayerID(info)
|
||||
return rc, n, err
|
||||
}
|
||||
|
||||
func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) {
|
||||
err = info.Digest.Validate()
|
||||
if err != nil {
|
||||
return nil, -1, "", err
|
||||
}
|
||||
if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 {
|
||||
b, err := s.imageRef.transport.store.GetImageBigData(s.ID, info.Digest.String())
|
||||
if err != nil {
|
||||
return nil, -1, "", err
|
||||
}
|
||||
r := bytes.NewReader(b)
|
||||
logrus.Debugf("exporting opaque data as blob %q", info.Digest.String())
|
||||
return ioutil.NopCloser(r), int64(r.Len()), "", nil
|
||||
}
|
||||
// If the blob was "put" more than once, we have multiple layer IDs
|
||||
// which should all produce the same diff. For the sake of tests that
|
||||
// want to make sure we created different layers each time the blob was
|
||||
// "put", though, cycle through the layers.
|
||||
layerList := s.Layers[info.Digest]
|
||||
position, ok := s.LayerPosition[info.Digest]
|
||||
if !ok {
|
||||
position = 0
|
||||
}
|
||||
s.LayerPosition[info.Digest] = (position + 1) % len(layerList)
|
||||
logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest)
|
||||
rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position])
|
||||
return rc, n, layerList[position], err
|
||||
}
|
||||
|
||||
func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) {
|
||||
layer, err := store.GetLayer(layerID)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
layerMeta := storageLayerMetadata{
|
||||
CompressedSize: -1,
|
||||
}
|
||||
if layer.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
|
||||
return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
|
||||
}
|
||||
}
|
||||
if layerMeta.CompressedSize <= 0 {
|
||||
n = -1
|
||||
} else {
|
||||
n = layerMeta.CompressedSize
|
||||
}
|
||||
diff, err := store.Diff("", layer.ID)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return diff, n, nil
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) {
|
||||
manifestBlob, err = s.imageRef.transport.store.GetImageBigData(s.ID, "manifest")
|
||||
return manifestBlob, manifest.GuessMIMEType(manifestBlob), err
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) {
|
||||
return nil, "", ErrNoManifestLists
|
||||
}
|
||||
|
||||
func (s *storageImageSource) GetSignatures() (signatures [][]byte, err error) {
|
||||
var offset int
|
||||
signature, err := s.imageRef.transport.store.GetImageBigData(s.ID, "signatures")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sigslice := [][]byte{}
|
||||
for _, length := range s.SignatureSizes {
|
||||
sigslice = append(sigslice, signature[offset:offset+length])
|
||||
offset += length
|
||||
}
|
||||
if offset != len(signature) {
|
||||
return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset)
|
||||
}
|
||||
return sigslice, nil
|
||||
}
|
||||
|
||||
func (s *storageImageSource) getSize() (int64, error) {
|
||||
var sum int64
|
||||
names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id)
|
||||
}
|
||||
for _, name := range names {
|
||||
bigSize, err := s.imageRef.transport.store.GetImageBigDataSize(s.imageRef.id, name)
|
||||
if err != nil {
|
||||
return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id)
|
||||
}
|
||||
sum += bigSize
|
||||
}
|
||||
for _, sigSize := range s.SignatureSizes {
|
||||
sum += int64(sigSize)
|
||||
}
|
||||
for _, layerList := range s.Layers {
|
||||
for _, layerID := range layerList {
|
||||
layer, err := s.imageRef.transport.store.GetLayer(layerID)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
layerMeta := storageLayerMetadata{
|
||||
Size: -1,
|
||||
}
|
||||
if layer.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil {
|
||||
return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID)
|
||||
}
|
||||
}
|
||||
if layerMeta.Size < 0 {
|
||||
return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
}
|
||||
sum += layerMeta.Size
|
||||
}
|
||||
}
|
||||
return sum, nil
|
||||
}
|
||||
|
||||
func (s *storageImage) Size() (int64, error) {
|
||||
return s.size, nil
|
||||
}
|
||||
|
||||
// newImage creates an image that also knows its size
|
||||
func newImage(s storageReference) (types.Image, error) {
|
||||
src, err := newImageSource(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
img, err := image.FromSource(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
size, err := src.getSize()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &storageImage{Image: img, size: size}, nil
|
||||
}
|
||||
127
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
Normal file
127
vendor/github.com/containers/image/storage/storage_reference.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// A storageReference holds an arbitrary name and/or an ID, which is a 32-byte
|
||||
// value hex-encoded into a 64-character string, and a reference to a Store
|
||||
// where an image is, or would be, kept.
|
||||
type storageReference struct {
|
||||
transport storageTransport
|
||||
reference string
|
||||
id string
|
||||
name reference.Named
|
||||
}
|
||||
|
||||
func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference {
|
||||
// We take a copy of the transport, which contains a pointer to the
|
||||
// store that it used for resolving this reference, so that the
|
||||
// transport that we'll return from Transport() won't be affected by
|
||||
// further calls to the original transport's SetStore() method.
|
||||
return &storageReference{
|
||||
transport: transport,
|
||||
reference: reference,
|
||||
id: id,
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve the reference's name to an image ID in the store, if there's already
|
||||
// one present with the same name or ID.
|
||||
func (s *storageReference) resolveID() string {
|
||||
if s.id == "" {
|
||||
image, err := s.transport.store.GetImage(s.reference)
|
||||
if image != nil && err == nil {
|
||||
s.id = image.ID
|
||||
}
|
||||
}
|
||||
return s.id
|
||||
}
|
||||
|
||||
// Return a Transport object that defaults to using the same store that we used
|
||||
// to build this reference object.
|
||||
func (s storageReference) Transport() types.ImageTransport {
|
||||
return &storageTransport{
|
||||
store: s.transport.store,
|
||||
}
|
||||
}
|
||||
|
||||
// Return a name with a tag, if we have a name to base them on.
|
||||
func (s storageReference) DockerReference() reference.Named {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// Return a name with a tag, prefixed with the graph root and driver name, to
|
||||
// disambiguate between images which may be present in multiple stores and
|
||||
// share only their names.
|
||||
func (s storageReference) StringWithinTransport() string {
|
||||
storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]"
|
||||
if s.name == nil {
|
||||
return storeSpec + "@" + s.id
|
||||
}
|
||||
if s.id == "" {
|
||||
return storeSpec + s.reference
|
||||
}
|
||||
return storeSpec + s.reference + "@" + s.id
|
||||
}
|
||||
|
||||
func (s storageReference) PolicyConfigurationIdentity() string {
|
||||
return s.StringWithinTransport()
|
||||
}
|
||||
|
||||
// Also accept policy that's tied to the combination of the graph root and
|
||||
// driver name, to apply to all images stored in the Store, and to just the
|
||||
// graph root, in case we're using multiple drivers in the same directory for
|
||||
// some reason.
|
||||
func (s storageReference) PolicyConfigurationNamespaces() []string {
|
||||
storeSpec := "[" + s.transport.store.GetGraphDriverName() + "@" + s.transport.store.GetGraphRoot() + "]"
|
||||
driverlessStoreSpec := "[" + s.transport.store.GetGraphRoot() + "]"
|
||||
namespaces := []string{}
|
||||
if s.name != nil {
|
||||
if s.id != "" {
|
||||
// The reference without the ID is also a valid namespace.
|
||||
namespaces = append(namespaces, storeSpec+s.reference)
|
||||
}
|
||||
components := strings.Split(s.name.Name(), "/")
|
||||
for len(components) > 0 {
|
||||
namespaces = append(namespaces, storeSpec+strings.Join(components, "/"))
|
||||
components = components[:len(components)-1]
|
||||
}
|
||||
}
|
||||
namespaces = append(namespaces, storeSpec)
|
||||
namespaces = append(namespaces, driverlessStoreSpec)
|
||||
return namespaces
|
||||
}
|
||||
|
||||
func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||
return newImage(s)
|
||||
}
|
||||
|
||||
func (s storageReference) DeleteImage(ctx *types.SystemContext) error {
|
||||
id := s.resolveID()
|
||||
if id == "" {
|
||||
logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return ErrNoSuchImage
|
||||
}
|
||||
layers, err := s.transport.store.DeleteImage(id, true)
|
||||
if err == nil {
|
||||
logrus.Debugf("deleted image %q", id)
|
||||
for _, layer := range layers {
|
||||
logrus.Debugf("deleted layer %q", layer)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (s storageReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||
return newImageSource(s)
|
||||
}
|
||||
|
||||
func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||
return newImageDestination(s)
|
||||
}
|
||||
291
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
Normal file
291
vendor/github.com/containers/image/storage/storage_transport.go
generated
vendored
Normal file
@@ -0,0 +1,291 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage/storage"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ddigest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
func init() {
|
||||
transports.Register(Transport)
|
||||
}
|
||||
|
||||
var (
|
||||
// Transport is an ImageTransport that uses either a default
|
||||
// storage.Store or one that's it's explicitly told to use.
|
||||
Transport StoreTransport = &storageTransport{}
|
||||
// ErrInvalidReference is returned when ParseReference() is passed an
|
||||
// empty reference.
|
||||
ErrInvalidReference = errors.New("invalid reference")
|
||||
// ErrPathNotAbsolute is returned when a graph root is not an absolute
|
||||
// path name.
|
||||
ErrPathNotAbsolute = errors.New("path name is not absolute")
|
||||
idRegexp = regexp.MustCompile("^(sha256:)?([0-9a-fA-F]{64})$")
|
||||
)
|
||||
|
||||
// StoreTransport is an ImageTransport that uses a storage.Store to parse
|
||||
// references, either its own default or one that it's told to use.
|
||||
type StoreTransport interface {
|
||||
types.ImageTransport
|
||||
// SetStore sets the default store for this transport.
|
||||
SetStore(storage.Store)
|
||||
// GetImage retrieves the image from the transport's store that's named
|
||||
// by the reference.
|
||||
GetImage(types.ImageReference) (*storage.Image, error)
|
||||
// GetStoreImage retrieves the image from a specified store that's named
|
||||
// by the reference.
|
||||
GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
|
||||
// ParseStoreReference parses a reference, overriding any store
|
||||
// specification that it may contain.
|
||||
ParseStoreReference(store storage.Store, reference string) (*storageReference, error)
|
||||
}
|
||||
|
||||
type storageTransport struct {
|
||||
store storage.Store
|
||||
}
|
||||
|
||||
func (s *storageTransport) Name() string {
|
||||
// Still haven't really settled on a name.
|
||||
return "containers-storage"
|
||||
}
|
||||
|
||||
// SetStore sets the Store object which the Transport will use for parsing
|
||||
// references when information about a Store is not directly specified as part
|
||||
// of the reference. If one is not set, the library will attempt to initialize
|
||||
// one with default settings when a reference needs to be parsed. Calling
|
||||
// SetStore does not affect previously parsed references.
|
||||
func (s *storageTransport) SetStore(store storage.Store) {
|
||||
s.store = store
|
||||
}
|
||||
|
||||
// ParseStoreReference takes a name or an ID, tries to figure out which it is
|
||||
// relative to the given store, and returns it in a reference object.
|
||||
func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) {
|
||||
var name reference.Named
|
||||
var sum digest.Digest
|
||||
var err error
|
||||
if ref == "" {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
if ref[0] == '[' {
|
||||
// Ignore the store specifier.
|
||||
closeIndex := strings.IndexRune(ref, ']')
|
||||
if closeIndex < 1 {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
ref = ref[closeIndex+1:]
|
||||
}
|
||||
refInfo := strings.SplitN(ref, "@", 2)
|
||||
if len(refInfo) == 1 {
|
||||
// A name.
|
||||
name, err = reference.ParseNormalizedNamed(refInfo[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if len(refInfo) == 2 {
|
||||
// An ID, possibly preceded by a name.
|
||||
if refInfo[0] != "" {
|
||||
name, err = reference.ParseNormalizedNamed(refInfo[0])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
sum, err = digest.Parse("sha256:" + refInfo[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else { // Coverage: len(refInfo) is always 1 or 2
|
||||
// Anything else: store specified in a form we don't
|
||||
// recognize.
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
storeSpec := "[" + store.GetGraphDriverName() + "@" + store.GetGraphRoot() + "]"
|
||||
id := ""
|
||||
if sum.Validate() == nil {
|
||||
id = sum.Hex()
|
||||
}
|
||||
refname := ""
|
||||
if name != nil {
|
||||
name = reference.TagNameOnly(name)
|
||||
refname = verboseName(name)
|
||||
}
|
||||
if refname == "" {
|
||||
logrus.Debugf("parsed reference into %q", storeSpec+"@"+id)
|
||||
} else if id == "" {
|
||||
logrus.Debugf("parsed reference into %q", storeSpec+refname)
|
||||
} else {
|
||||
logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id)
|
||||
}
|
||||
return newReference(storageTransport{store: store}, refname, id, name), nil
|
||||
}
|
||||
|
||||
func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
// Return the transport's previously-set store. If we don't have one
|
||||
// of those, initialize one now.
|
||||
if s.store == nil {
|
||||
store, err := storage.GetStore(storage.DefaultStoreOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.store = store
|
||||
}
|
||||
return s.store, nil
|
||||
}
|
||||
|
||||
// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"),
|
||||
// possibly prefixed with a store specifier in the form "[_graphroot_]" or
|
||||
// "[_driver_@_graphroot_]", tries to figure out which it is, and returns it in
|
||||
// a reference object. If the _graphroot_ is a location other than the default,
|
||||
// it needs to have been previously opened using storage.GetStore(), so that it
|
||||
// can figure out which run root goes with the graph root.
|
||||
func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||
store, err := s.GetStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Check if there's a store location prefix. If there is, then it
|
||||
// needs to match a store that was previously initialized using
|
||||
// storage.GetStore(), or be enough to let the storage library fill out
|
||||
// the rest using knowledge that it has from elsewhere.
|
||||
if reference[0] == '[' {
|
||||
closeIndex := strings.IndexRune(reference, ']')
|
||||
if closeIndex < 1 {
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
storeSpec := reference[1:closeIndex]
|
||||
reference = reference[closeIndex+1:]
|
||||
storeInfo := strings.SplitN(storeSpec, "@", 2)
|
||||
if len(storeInfo) == 1 && storeInfo[0] != "" {
|
||||
// One component: the graph root.
|
||||
if !filepath.IsAbs(storeInfo[0]) {
|
||||
return nil, ErrPathNotAbsolute
|
||||
}
|
||||
store2, err := storage.GetStore(storage.StoreOptions{
|
||||
GraphRoot: storeInfo[0],
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store = store2
|
||||
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
|
||||
// Two components: the driver type and the graph root.
|
||||
if !filepath.IsAbs(storeInfo[1]) {
|
||||
return nil, ErrPathNotAbsolute
|
||||
}
|
||||
store2, err := storage.GetStore(storage.StoreOptions{
|
||||
GraphDriverName: storeInfo[0],
|
||||
GraphRoot: storeInfo[1],
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store = store2
|
||||
} else {
|
||||
// Anything else: store specified in a form we don't
|
||||
// recognize.
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
}
|
||||
return s.ParseStoreReference(store, reference)
|
||||
}
|
||||
|
||||
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
|
||||
dref := ref.DockerReference()
|
||||
if dref == nil {
|
||||
if sref, ok := ref.(*storageReference); ok {
|
||||
if sref.id != "" {
|
||||
if img, err := store.GetImage(sref.id); err == nil {
|
||||
return img, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ErrInvalidReference
|
||||
}
|
||||
return store.GetImage(verboseName(dref))
|
||||
}
|
||||
|
||||
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
|
||||
store, err := s.GetStore()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.GetStoreImage(store, ref)
|
||||
}
|
||||
|
||||
func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||
// Check that there's a store location prefix. Values we're passed are
|
||||
// expected to come from PolicyConfigurationIdentity or
|
||||
// PolicyConfigurationNamespaces, so if there's no store location,
|
||||
// something's wrong.
|
||||
if scope[0] != '[' {
|
||||
return ErrInvalidReference
|
||||
}
|
||||
// Parse the store location prefix.
|
||||
closeIndex := strings.IndexRune(scope, ']')
|
||||
if closeIndex < 1 {
|
||||
return ErrInvalidReference
|
||||
}
|
||||
storeSpec := scope[1:closeIndex]
|
||||
scope = scope[closeIndex+1:]
|
||||
storeInfo := strings.SplitN(storeSpec, "@", 2)
|
||||
if len(storeInfo) == 1 && storeInfo[0] != "" {
|
||||
// One component: the graph root.
|
||||
if !filepath.IsAbs(storeInfo[0]) {
|
||||
return ErrPathNotAbsolute
|
||||
}
|
||||
} else if len(storeInfo) == 2 && storeInfo[0] != "" && storeInfo[1] != "" {
|
||||
// Two components: the driver type and the graph root.
|
||||
if !filepath.IsAbs(storeInfo[1]) {
|
||||
return ErrPathNotAbsolute
|
||||
}
|
||||
} else {
|
||||
// Anything else: store specified in a form we don't
|
||||
// recognize.
|
||||
return ErrInvalidReference
|
||||
}
|
||||
// That might be all of it, and that's okay.
|
||||
if scope == "" {
|
||||
return nil
|
||||
}
|
||||
// But if there is anything left, it has to be a name, with or without
|
||||
// a tag, with or without an ID, since we don't return namespace values
|
||||
// that are just bare IDs.
|
||||
scopeInfo := strings.SplitN(scope, "@", 2)
|
||||
if len(scopeInfo) == 1 && scopeInfo[0] != "" {
|
||||
_, err := reference.ParseNormalizedNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if len(scopeInfo) == 2 && scopeInfo[0] != "" && scopeInfo[1] != "" {
|
||||
_, err := reference.ParseNormalizedNamed(scopeInfo[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = ddigest.Parse("sha256:" + scopeInfo[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return ErrInvalidReference
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func verboseName(name reference.Named) string {
|
||||
name = reference.TagNameOnly(name)
|
||||
tag := ""
|
||||
if tagged, ok := name.(reference.NamedTagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
return name.Name() + ":" + tag
|
||||
}
|
||||
32
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
Normal file
32
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package alltransports
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
// register all known transports
|
||||
// NOTE: Make sure docs/policy.json.md is updated when adding or updating
|
||||
// a transport.
|
||||
_ "github.com/containers/image/directory"
|
||||
_ "github.com/containers/image/docker"
|
||||
_ "github.com/containers/image/docker/archive"
|
||||
_ "github.com/containers/image/docker/daemon"
|
||||
_ "github.com/containers/image/oci/layout"
|
||||
_ "github.com/containers/image/openshift"
|
||||
_ "github.com/containers/image/storage"
|
||||
"github.com/containers/image/transports"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseImageName converts a URL-like image name to a types.ImageReference.
|
||||
func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
parts := strings.SplitN(imgName, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, errors.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
}
|
||||
transport := transports.Get(parts[0])
|
||||
if transport == nil {
|
||||
return nil, errors.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
|
||||
}
|
||||
return transport.ParseReference(parts[1])
|
||||
}
|
||||
80
vendor/github.com/containers/image/transports/transports.go
generated
vendored
80
vendor/github.com/containers/image/transports/transports.go
generated
vendored
@@ -2,49 +2,61 @@ package transports
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/docker"
|
||||
"github.com/containers/image/docker/daemon"
|
||||
ociLayout "github.com/containers/image/oci/layout"
|
||||
"github.com/containers/image/openshift"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// KnownTransports is a registry of known ImageTransport instances.
|
||||
var KnownTransports map[string]types.ImageTransport
|
||||
// knownTransports is a registry of known ImageTransport instances.
|
||||
type knownTransports struct {
|
||||
transports map[string]types.ImageTransport
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (kt *knownTransports) Get(k string) types.ImageTransport {
|
||||
kt.mu.Lock()
|
||||
t := kt.transports[k]
|
||||
kt.mu.Unlock()
|
||||
return t
|
||||
}
|
||||
|
||||
func (kt *knownTransports) Remove(k string) {
|
||||
kt.mu.Lock()
|
||||
delete(kt.transports, k)
|
||||
kt.mu.Unlock()
|
||||
}
|
||||
|
||||
func (kt *knownTransports) Add(t types.ImageTransport) {
|
||||
kt.mu.Lock()
|
||||
defer kt.mu.Unlock()
|
||||
name := t.Name()
|
||||
if t := kt.transports[name]; t != nil {
|
||||
panic(fmt.Sprintf("Duplicate image transport name %s", name))
|
||||
}
|
||||
kt.transports[name] = t
|
||||
}
|
||||
|
||||
var kt *knownTransports
|
||||
|
||||
func init() {
|
||||
KnownTransports = make(map[string]types.ImageTransport)
|
||||
// NOTE: Make sure docs/policy.json.md is updated when adding or updating
|
||||
// a transport.
|
||||
for _, t := range []types.ImageTransport{
|
||||
directory.Transport,
|
||||
docker.Transport,
|
||||
daemon.Transport,
|
||||
ociLayout.Transport,
|
||||
openshift.Transport,
|
||||
} {
|
||||
name := t.Name()
|
||||
if _, ok := KnownTransports[name]; ok {
|
||||
panic(fmt.Sprintf("Duplicate image transport name %s", name))
|
||||
}
|
||||
KnownTransports[name] = t
|
||||
kt = &knownTransports{
|
||||
transports: make(map[string]types.ImageTransport),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseImageName converts a URL-like image name to a types.ImageReference.
|
||||
func ParseImageName(imgName string) (types.ImageReference, error) {
|
||||
parts := strings.SplitN(imgName, ":", 2)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", expected colon-separated transport:reference`, imgName)
|
||||
}
|
||||
transport, ok := KnownTransports[parts[0]]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(`Invalid image name "%s", unknown transport "%s"`, imgName, parts[0])
|
||||
}
|
||||
return transport.ParseReference(parts[1])
|
||||
// Get returns the transport specified by name or nil when unavailable.
|
||||
func Get(name string) types.ImageTransport {
|
||||
return kt.Get(name)
|
||||
}
|
||||
|
||||
// Delete deletes a transport from the registered transports.
|
||||
func Delete(name string) {
|
||||
kt.Remove(name)
|
||||
}
|
||||
|
||||
// Register registers a transport.
|
||||
func Register(t types.ImageTransport) {
|
||||
kt.Add(t)
|
||||
}
|
||||
|
||||
// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that
|
||||
|
||||
31
vendor/github.com/containers/image/types/types.go
generated
vendored
31
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -5,7 +5,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageTransport is a top-level namespace for ways to to store/load an image.
|
||||
@@ -109,7 +110,7 @@ type ImageSource interface {
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
Reference() ImageReference
|
||||
// Close removes resources associated with an initialized ImageSource, if any.
|
||||
Close()
|
||||
Close() error
|
||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||
// It may use a remote (= slow) service.
|
||||
GetManifest() ([]byte, string, error)
|
||||
@@ -127,6 +128,7 @@ type ImageSource interface {
|
||||
//
|
||||
// There is a specific required order for some of the calls:
|
||||
// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
|
||||
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
|
||||
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
|
||||
//
|
||||
@@ -136,7 +138,7 @@ type ImageDestination interface {
|
||||
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||
Reference() ImageReference
|
||||
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||
Close()
|
||||
Close() error
|
||||
|
||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||
@@ -158,6 +160,13 @@ type ImageDestination interface {
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
HasBlob(info BlobInfo) (bool, int64, error)
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
|
||||
ReapplyBlob(info BlobInfo) (BlobInfo, error)
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
PutManifest([]byte) error
|
||||
PutSignatures(signatures [][]byte) error
|
||||
@@ -178,7 +187,7 @@ type UnparsedImage interface {
|
||||
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||
Reference() ImageReference
|
||||
// Close removes resources associated with an initialized UnparsedImage, if any.
|
||||
Close()
|
||||
Close() error
|
||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||
Manifest() ([]byte, string, error)
|
||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||
@@ -196,6 +205,10 @@ type Image interface {
|
||||
// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
|
||||
// The result is cached; it is OK to call this however often you need.
|
||||
ConfigBlob() ([]byte, error)
|
||||
// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
|
||||
// layers in the resulting configuration isn't guaranteed to be returned to due how
|
||||
// old image manifests work (docker v2s1 especially).
|
||||
OCIConfig() (*v1.Image, error)
|
||||
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
@@ -212,6 +225,9 @@ type Image interface {
|
||||
UpdatedImage(options ManifestUpdateOptions) (Image, error)
|
||||
// IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
|
||||
IsMultiImage() bool
|
||||
// Size returns an approximation of the amount of disk space which is consumed by the image in its current
|
||||
// location. If the size is not known, -1 will be returned.
|
||||
Size() (int64, error)
|
||||
}
|
||||
|
||||
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
|
||||
@@ -284,3 +300,10 @@ type SystemContext struct {
|
||||
// in order to not break any existing docker's integration tests.
|
||||
DockerDisableV1Ping bool
|
||||
}
|
||||
|
||||
// ProgressProperties is used to pass information from the copy code to a monitor which
|
||||
// can use the real-time information to produce output or react to changes.
|
||||
type ProgressProperties struct {
|
||||
Artifact BlobInfo
|
||||
Offset uint64
|
||||
}
|
||||
|
||||
31
vendor/github.com/containers/image/vendor.conf
generated
vendored
Normal file
31
vendor/github.com/containers/image/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
github.com/Sirupsen/logrus 7f4b1adc791766938c29457bed0703fb9134421a
|
||||
github.com/containers/storage 5cbbc6bafb45bd7ef10486b673deb3b81bb3b787
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb
|
||||
github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e
|
||||
github.com/docker/go-connections 7da10c8c50cad14494ec818dcdfb6506265c0086
|
||||
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
||||
github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
|
||||
github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
|
||||
github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
|
||||
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||
github.com/mattn/go-shellwords 005a0944d84452842197c2108bd9168ced206f78
|
||||
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
||||
github.com/opencontainers/image-spec v1.0.0-rc4
|
||||
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
|
||||
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||
github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302
|
||||
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
|
||||
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
||||
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
||||
golang.org/x/sys 075e574b89e4c2d22f2286a7e2b919519c6f3547
|
||||
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
||||
github.com/xeipuuv/gojsonschema master
|
||||
@@ -176,7 +176,7 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2015-2016 Docker, Inc.
|
||||
Copyright 2013-2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
19
vendor/github.com/containers/storage/NOTICE
generated
vendored
Normal file
19
vendor/github.com/containers/storage/NOTICE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Docker
|
||||
Copyright 2012-2016 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
This product contains software (https://github.com/kr/pty) developed
|
||||
by Keith Rarick, licensed under the MIT License.
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
||||
43
vendor/github.com/containers/storage/README.md
generated
vendored
Normal file
43
vendor/github.com/containers/storage/README.md
generated
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
`storage` is a Go library which aims to provide methods for storing filesystem
|
||||
layers, container images, and containers. An `oci-storage` CLI wrapper is also
|
||||
included for manual and scripting use.
|
||||
|
||||
To build the CLI wrapper, use 'make build-binary'.
|
||||
|
||||
Operations which use VMs expect to launch them using 'vagrant', defaulting to
|
||||
using its 'libvirt' provider. The boxes used are also available for the
|
||||
'virtualbox' provider, and can be selected by setting $VAGRANT_PROVIDER to
|
||||
'virtualbox' before kicking off the build.
|
||||
|
||||
The library manages three types of items: layers, images, and containers.
|
||||
|
||||
A *layer* is a copy-on-write filesystem which is notionally stored as a set of
|
||||
changes relative to its *parent* layer, if it has one. A given layer can only
|
||||
have one parent, but any layer can be the parent of multiple layers. Layers
|
||||
which are parents of other layers should be treated as read-only.
|
||||
|
||||
An *image* is a reference to a particular layer (its _top_ layer), along with
|
||||
other information which the library can manage for the convenience of its
|
||||
caller. This information typically includes configuration templates for
|
||||
running a binary contained within the image's layers, and may include
|
||||
cryptographic signatures. Multiple images can reference the same layer, as the
|
||||
differences between two images may not be in their layer contents.
|
||||
|
||||
A *container* is a read-write layer which is a child of an image's top layer,
|
||||
along with information which the library can manage for the convenience of its
|
||||
caller. This information typically includes configuration information for
|
||||
running the specific container. Multiple containers can be derived from a
|
||||
single image.
|
||||
|
||||
Layers, images, and containers are represented primarily by 32 character
|
||||
hexadecimal IDs, but items of each kind can also have one or more arbitrary
|
||||
names attached to them, which the library will automatically resolve to IDs
|
||||
when they are passed in to API calls which expect IDs.
|
||||
|
||||
The library can store what it calls *metadata* for each of these types of
|
||||
items. This is expected to be a small piece of data, since it is cached in
|
||||
memory and stored along with the library's own bookkeeping information.
|
||||
|
||||
Additionally, the library can store one or more of what it calls *big data* for
|
||||
images and containers. This is a named chunk of larger data, which is only in
|
||||
memory when it is being read from or being written to its own disk file.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user