diff --git a/cmd/skopeo/copy.go b/cmd/skopeo/copy.go index 94648176..3191402a 100644 --- a/cmd/skopeo/copy.go +++ b/cmd/skopeo/copy.go @@ -1,80 +1,14 @@ package main import ( - "crypto/sha256" - "crypto/subtle" - "encoding/hex" "errors" "fmt" - "hash" - "io" - "strings" - "github.com/containers/image/image" - "github.com/containers/image/signature" + "github.com/containers/image/copy" "github.com/containers/image/transports" "github.com/urfave/cli" ) -// supportedDigests lists the supported blob digest types. -var supportedDigests = map[string]func() hash.Hash{ - "sha256": sha256.New, -} - -type digestingReader struct { - source io.Reader - digest hash.Hash - expectedDigest []byte - failureIndicator *bool -} - -// newDigestingReader returns an io.Reader with contents of source, which will eventually return a non-EOF error -// and set *failureIndicator to true if the source stream does not match expectedDigestString. -func newDigestingReader(source io.Reader, expectedDigestString string, failureIndicator *bool) (io.Reader, error) { - fields := strings.SplitN(expectedDigestString, ":", 2) - if len(fields) != 2 { - return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString) - } - fn, ok := supportedDigests[fields[0]] - if !ok { - return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0]) - } - digest := fn() - expectedDigest, err := hex.DecodeString(fields[1]) - if err != nil { - return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err) - } - if len(expectedDigest) != digest.Size() { - return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size()) - } - return &digestingReader{ - source: source, - digest: digest, - expectedDigest: expectedDigest, - failureIndicator: failureIndicator, - }, nil -} - -func (d *digestingReader) Read(p []byte) (int, error) { - n, err := d.source.Read(p) - if n > 0 { - if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil { - // Coverage: This should not happen, the hash.Hash interface requires - // d.digest.Write to never return an error, and the io.Writer interface - // requires n2 == len(input) if no error is returned. - return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err) - } - } - if err == io.EOF { - actualDigest := d.digest.Sum(nil) - if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 { - *d.failureIndicator = true - return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest)) - } - } - return n, err -} - func copyHandler(context *cli.Context) error { if len(context.Args()) != 2 { return errors.New("Usage: copy source destination") @@ -86,90 +20,19 @@ func copyHandler(context *cli.Context) error { } defer policyContext.Destroy() - dest, err := parseImageDestination(context, context.Args()[1]) + srcRef, err := transports.ParseImageName(context.Args()[0]) if err != nil { - return fmt.Errorf("Error initializing %s: %v", context.Args()[1], err) + return fmt.Errorf("Invalid source name %s: %v", context.Args()[0], err) } - - rawSource, err := parseImageSource(context, context.Args()[0], dest.SupportedManifestMIMETypes()) + destRef, err := transports.ParseImageName(context.Args()[1]) if err != nil { - return fmt.Errorf("Error initializing %s: %v", context.Args()[0], err) + return fmt.Errorf("Invalid destination name %s: %v", context.Args()[1], err) } - src := image.FromSource(rawSource) - signBy := context.String("sign-by") - // Please keep this policy check BEFORE reading any other information about the image. - if allowed, err := policyContext.IsRunningImageAllowed(src); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. - return fmt.Errorf("Source image rejected: %v", err) - } - - manifest, _, err := src.Manifest() - if err != nil { - return fmt.Errorf("Error reading manifest: %v", err) - } - - sigs, err := src.Signatures() - if err != nil { - return fmt.Errorf("Error reading signatures: %v", err) - } - - blobDigests, err := src.BlobDigests() - if err != nil { - return fmt.Errorf("Error parsing manifest: %v", err) - } - for _, digest := range blobDigests { - // TODO(mitr): do not ignore the size param returned here - stream, _, err := rawSource.GetBlob(digest) - if err != nil { - return fmt.Errorf("Error reading blob %s: %v", digest, err) - } - defer stream.Close() - - // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, - // use a separate validation failure indicator. - // Note that we don't use a stronger "validationSucceeded" indicator, because - // dest.PutBlob may detect that the layer already exists, in which case we don't - // read stream to the end, and validation does not happen. - validationFailed := false // This is a new instance on each loop iteration. - digestingReader, err := newDigestingReader(stream, digest, &validationFailed) - if err != nil { - return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err) - } - if err := dest.PutBlob(digest, digestingReader); err != nil { - return fmt.Errorf("Error writing blob: %v", err) - } - if validationFailed { // Coverage: This should never happen. - return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest) - } - } - - if signBy != "" { - mech, err := signature.NewGPGSigningMechanism() - if err != nil { - return fmt.Errorf("Error initializing GPG: %v", err) - } - dockerReference := dest.Reference().DockerReference() - if dockerReference == nil { - return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) - } - - newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, signBy) - if err != nil { - return fmt.Errorf("Error creating signature: %v", err) - } - sigs = append(sigs, newSig) - } - - // FIXME: We need to call PutManifest after PutBlob and before PutSignatures. This seems ugly; move to a "set properties" + "commit" model? - if err := dest.PutManifest(manifest); err != nil { - return fmt.Errorf("Error writing manifest: %v", err) - } - - if err := dest.PutSignatures(sigs); err != nil { - return fmt.Errorf("Error writing signatures: %v", err) - } - return nil + return copy.Image(contextFromGlobalOptions(context), policyContext, destRef, srcRef, ©.Options{ + SignBy: signBy, + }) } var copyCmd = cli.Command{ diff --git a/cmd/skopeo/copy_test.go b/cmd/skopeo/copy_test.go deleted file mode 100644 index 6601e069..00000000 --- a/cmd/skopeo/copy_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package main - -import ( - "bytes" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewDigestingReader(t *testing.T) { - // Only the failure cases, success is tested in TestDigestingReaderRead below. - source := bytes.NewReader([]byte("abc")) - for _, input := range []string{ - "abc", // Not algo:hexvalue - "crc32:", // Unknown algorithm, empty value - "crc32:012345678", // Unknown algorithm - "sha256:", // Empty value - "sha256:0", // Invalid hex value - "sha256:01", // Invalid length of hex value - } { - validationFailed := false - _, err := newDigestingReader(source, input, &validationFailed) - assert.Error(t, err, input) - } -} - -func TestDigestingReaderRead(t *testing.T) { - cases := []struct { - input []byte - digest string - }{ - {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, - {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, - {make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"}, - } - // Valid input - for _, c := range cases { - source := bytes.NewReader(c.input) - validationFailed := false - reader, err := newDigestingReader(source, c.digest, &validationFailed) - require.NoError(t, err, c.digest) - dest := bytes.Buffer{} - n, err := io.Copy(&dest, reader) - assert.NoError(t, err, c.digest) - assert.Equal(t, int64(len(c.input)), n, c.digest) - assert.Equal(t, c.input, dest.Bytes(), c.digest) - assert.False(t, validationFailed, c.digest) - } - // Modified input - for _, c := range cases { - source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil)) - validationFailed := false - reader, err := newDigestingReader(source, c.digest, &validationFailed) - require.NoError(t, err, c.digest) - dest := bytes.Buffer{} - _, err = io.Copy(&dest, reader) - assert.Error(t, err, c.digest) - assert.True(t, validationFailed) - } -} diff --git a/cmd/skopeo/utils.go b/cmd/skopeo/utils.go index d73b74e9..a646e9a1 100644 --- a/cmd/skopeo/utils.go +++ b/cmd/skopeo/utils.go @@ -35,12 +35,3 @@ func parseImageSource(c *cli.Context, name string, requestedManifestMIMETypes [] } return ref.NewImageSource(contextFromGlobalOptions(c), requestedManifestMIMETypes) } - -// parseImageDestination converts image URL-like string to an ImageDestination. -func parseImageDestination(c *cli.Context, name string) (types.ImageDestination, error) { - ref, err := transports.ParseImageName(name) - if err != nil { - return nil, err - } - return ref.NewImageDestination(contextFromGlobalOptions(c)) -} diff --git a/hack/.vendor-helpers.sh b/hack/.vendor-helpers.sh index 1a1d5db0..27b2f805 100755 --- a/hack/.vendor-helpers.sh +++ b/hack/.vendor-helpers.sh @@ -36,7 +36,7 @@ clone() { case "$vcs" in git) git clone --quiet --no-checkout "$url" "$target" - ( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" ) + ( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" -- ) ;; hg) hg clone --quiet --updaterev "$rev" "$url" "$target" diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go new file mode 100644 index 00000000..79fd3b18 --- /dev/null +++ b/vendor/github.com/containers/image/copy/copy.go @@ -0,0 +1,164 @@ +package copy + +import ( + "crypto/sha256" + "crypto/subtle" + "encoding/hex" + "fmt" + "hash" + "io" + "strings" + + "github.com/containers/image/image" + "github.com/containers/image/signature" + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +// supportedDigests lists the supported blob digest types. +var supportedDigests = map[string]func() hash.Hash{ + "sha256": sha256.New, +} + +type digestingReader struct { + source io.Reader + digest hash.Hash + expectedDigest []byte + validationFailed bool +} + +// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error +// and set validationFailed to true if the source stream does not match expectedDigestString. +func newDigestingReader(source io.Reader, expectedDigestString string) (*digestingReader, error) { + fields := strings.SplitN(expectedDigestString, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString) + } + fn, ok := supportedDigests[fields[0]] + if !ok { + return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0]) + } + digest := fn() + expectedDigest, err := hex.DecodeString(fields[1]) + if err != nil { + return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err) + } + if len(expectedDigest) != digest.Size() { + return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size()) + } + return &digestingReader{ + source: source, + digest: digest, + expectedDigest: expectedDigest, + validationFailed: false, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digest.Sum(nil) + if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 { + d.validationFailed = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest)) + } + } + return n, err +} + +// Options allows supplying non-default configuration modifying the behavior of CopyImage. +type Options struct { + SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(), +} + +// Image copies image from srcRef to destRef, using policyContext to validate source image admissibility. +func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) error { + dest, err := destRef.NewImageDestination(ctx) + if err != nil { + return fmt.Errorf("Error initializing destination %s: %v", transports.ImageName(destRef), err) + } + rawSource, err := srcRef.NewImageSource(ctx, dest.SupportedManifestMIMETypes()) + if err != nil { + return fmt.Errorf("Error initializing source %s: %v", transports.ImageName(srcRef), err) + } + src := image.FromSource(rawSource) + + // Please keep this policy check BEFORE reading any other information about the image. + if allowed, err := policyContext.IsRunningImageAllowed(src); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. + return fmt.Errorf("Source image rejected: %v", err) + } + + manifest, _, err := src.Manifest() + if err != nil { + return fmt.Errorf("Error reading manifest: %v", err) + } + + sigs, err := src.Signatures() + if err != nil { + return fmt.Errorf("Error reading signatures: %v", err) + } + + blobDigests, err := src.BlobDigests() + if err != nil { + return fmt.Errorf("Error parsing manifest: %v", err) + } + for _, digest := range blobDigests { + // TODO(mitr): do not ignore the size param returned here + stream, _, err := rawSource.GetBlob(digest) + if err != nil { + return fmt.Errorf("Error reading blob %s: %v", digest, err) + } + defer stream.Close() + + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that we don't use a stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + digestingReader, err := newDigestingReader(stream, digest) + if err != nil { + return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err) + } + if err := dest.PutBlob(digest, digestingReader); err != nil { + return fmt.Errorf("Error writing blob: %v", err) + } + if digestingReader.validationFailed { // Coverage: This should never happen. + return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest) + } + } + + if options != nil && options.SignBy != "" { + mech, err := signature.NewGPGSigningMechanism() + if err != nil { + return fmt.Errorf("Error initializing GPG: %v", err) + } + dockerReference := dest.Reference().DockerReference() + if dockerReference == nil { + return fmt.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) + } + + newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, options.SignBy) + if err != nil { + return fmt.Errorf("Error creating signature: %v", err) + } + sigs = append(sigs, newSig) + } + + // FIXME: We need to call PutManifest after PutBlob and before PutSignatures. This seems ugly; move to a "set properties" + "commit" model? + if err := dest.PutManifest(manifest); err != nil { + return fmt.Errorf("Error writing manifest: %v", err) + } + + if err := dest.PutSignatures(sigs); err != nil { + return fmt.Errorf("Error writing signatures: %v", err) + } + return nil +}