From 6e2cd739da4fd4c4834f35d21c5e26e378f75212 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miloslav=20Trma=C4=8D?= Date: Thu, 4 Aug 2016 19:27:28 +0200 Subject: [PATCH 1/2] Vendor after merging mtrmac/image:PutBlob-error-handling --- .../image/directory/directory_dest.go | 29 ++++++++++++++++--- .../image/directory/directory_transport.go | 6 +++- .../image/docker/docker_image_dest.go | 5 ++++ .../containers/image/oci/oci_dest.go | 27 ++++++++++++++--- .../containers/image/oci/oci_transport.go | 4 +++ .../containers/image/openshift/openshift.go | 5 ++++ .../containers/image/types/types.go | 4 +++ 7 files changed, 71 insertions(+), 9 deletions(-) diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index 9cee862a..fb5ff757 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -4,6 +4,7 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "github.com/containers/image/types" ) @@ -31,18 +32,38 @@ func (d *dirImageDestination) PutManifest(manifest []byte) error { return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644) } +// PutBlob writes contents of stream as a blob identified by digest. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document. func (d *dirImageDestination) PutBlob(digest string, stream io.Reader) error { - layerFile, err := os.Create(d.ref.layerPath(digest)) + blobPath := d.ref.layerPath(digest) + blobFile, err := ioutil.TempFile(filepath.Dir(blobPath), filepath.Base(blobPath)) if err != nil { return err } - defer layerFile.Close() - if _, err := io.Copy(layerFile, stream); err != nil { + succeeded := false + defer func() { + blobFile.Close() + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + if _, err := io.Copy(blobFile, stream); err != nil { return err } - if err := layerFile.Sync(); err != nil { + if err := blobFile.Sync(); err != nil { return err } + if err := blobFile.Chmod(0644); err != nil { + return err + } + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return nil + } + succeeded = true return nil } diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go index a6b7bed0..c5547f7c 100644 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ b/vendor/github.com/containers/image/directory/directory_transport.go @@ -32,13 +32,17 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er // scope passed to this function will not be "", that value is always allowed. func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error { if !strings.HasPrefix(scope, "/") { - return fmt.Errorf("Invalid scope %s: must be an absolute path", scope) + return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope) } // Refuse also "/", otherwise "/" and "" would have the same semantics, // and "" could be unexpectedly shadowed by the "/" entry. if scope == "/" { return errors.New(`Invalid scope "/": Use the generic default scope ""`) } + cleaned := filepath.Clean(scope) + if cleaned != scope { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned) + } return nil } diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 818c8409..86a38e64 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -74,6 +74,11 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return nil } +// PutBlob writes contents of stream as a blob identified by digest. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document. func (d *dockerImageDestination) PutBlob(digest string, stream io.Reader) error { checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest) diff --git a/vendor/github.com/containers/image/oci/oci_dest.go b/vendor/github.com/containers/image/oci/oci_dest.go index 6509aba2..163d5004 100644 --- a/vendor/github.com/containers/image/oci/oci_dest.go +++ b/vendor/github.com/containers/image/oci/oci_dest.go @@ -110,22 +110,41 @@ func (d *ociImageDestination) PutManifest(m []byte) error { return ioutil.WriteFile(descriptorPath, data, 0644) } +// PutBlob writes contents of stream as a blob identified by digest. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document. func (d *ociImageDestination) PutBlob(digest string, stream io.Reader) error { blobPath := d.ref.blobPath(digest) if err := ensureParentDirectoryExists(blobPath); err != nil { return err } - blob, err := os.Create(blobPath) + blobFile, err := ioutil.TempFile(filepath.Dir(blobPath), filepath.Base(blobPath)) if err != nil { return err } - defer blob.Close() - if _, err := io.Copy(blob, stream); err != nil { + succeeded := false + defer func() { + blobFile.Close() + if !succeeded { + os.Remove(blobFile.Name()) + } + }() + + if _, err := io.Copy(blobFile, stream); err != nil { return err } - if err := blob.Sync(); err != nil { + if err := blobFile.Sync(); err != nil { return err } + if err := blobFile.Chmod(0644); err != nil { + return err + } + if err := os.Rename(blobFile.Name(), blobPath); err != nil { + return nil + } + succeeded = true return nil } diff --git a/vendor/github.com/containers/image/oci/oci_transport.go b/vendor/github.com/containers/image/oci/oci_transport.go index 1ec28f57..f3afd221 100644 --- a/vendor/github.com/containers/image/oci/oci_transport.go +++ b/vendor/github.com/containers/image/oci/oci_transport.go @@ -58,6 +58,10 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { if scope == "/" { return errors.New(`Invalid scope "/": Use the generic default scope ""`) } + cleaned := filepath.Clean(dir) + if cleaned != dir { + return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } return nil } diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 659f4783..17ae5c43 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -368,6 +368,11 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error { return nil } +// PutBlob writes contents of stream as a blob identified by digest. +// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available +// to any other readers for download using the supplied digest. +// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. +// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document. func (d *openshiftImageDestination) PutBlob(digest string, stream io.Reader) error { return d.docker.PutBlob(digest, stream) } diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 086e0080..b67e6b30 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -104,6 +104,10 @@ type ImageDestination interface { Reference() ImageReference // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. PutManifest([]byte) error + // PutBlob writes contents of stream as a blob identified by digest. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document. PutBlob(digest string, stream io.Reader) error PutSignatures(signatures [][]byte) error From 23c96cb998a0c1527aa15cb60c0bda566b12348e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miloslav=20Trma=C4=8D?= Date: Thu, 4 Aug 2016 15:59:18 +0200 Subject: [PATCH 2/2] Verify blobs against the expected digests while copying them. Note that this requires ImageDestination.PutBlob to fail and delete any unfinished data if stream.Read() fails. We do not have to trust PutBlob to correctly handle a validation error, so we don't; but we can't do the storage cleanup for PutBlob. --- cmd/skopeo/copy.go | 81 ++++++++++++++++++++++++++++++++++++++++- cmd/skopeo/copy_test.go | 62 +++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 cmd/skopeo/copy_test.go diff --git a/cmd/skopeo/copy.go b/cmd/skopeo/copy.go index b5aa1b73..6af7ff86 100644 --- a/cmd/skopeo/copy.go +++ b/cmd/skopeo/copy.go @@ -1,8 +1,14 @@ package main import ( + "crypto/sha256" + "crypto/subtle" + "encoding/hex" "errors" "fmt" + "hash" + "io" + "strings" "github.com/containers/image/image" "github.com/containers/image/signature" @@ -10,6 +16,65 @@ import ( "github.com/urfave/cli" ) +// supportedDigests lists the supported blob digest types. +var supportedDigests = map[string]func() hash.Hash{ + "sha256": sha256.New, +} + +type digestingReader struct { + source io.Reader + digest hash.Hash + expectedDigest []byte + failureIndicator *bool +} + +// newDigestingReader returns an io.Reader with contents of source, which will eventually return a non-EOF error +// and set *failureIndicator to true if the source stream does not match expectedDigestString. +func newDigestingReader(source io.Reader, expectedDigestString string, failureIndicator *bool) (io.Reader, error) { + fields := strings.SplitN(expectedDigestString, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString) + } + fn, ok := supportedDigests[fields[0]] + if !ok { + return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0]) + } + digest := fn() + expectedDigest, err := hex.DecodeString(fields[1]) + if err != nil { + return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err) + } + if len(expectedDigest) != digest.Size() { + return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size()) + } + return &digestingReader{ + source: source, + digest: digest, + expectedDigest: expectedDigest, + failureIndicator: failureIndicator, + }, nil +} + +func (d *digestingReader) Read(p []byte) (int, error) { + n, err := d.source.Read(p) + if n > 0 { + if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil { + // Coverage: This should not happen, the hash.Hash interface requires + // d.digest.Write to never return an error, and the io.Writer interface + // requires n2 == len(input) if no error is returned. + return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err) + } + } + if err == io.EOF { + actualDigest := d.digest.Sum(nil) + if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 { + *d.failureIndicator = true + return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest)) + } + } + return n, err +} + func copyHandler(context *cli.Context) error { if len(context.Args()) != 2 { return errors.New("Usage: copy source destination") @@ -44,9 +109,23 @@ func copyHandler(context *cli.Context) error { return fmt.Errorf("Error reading blob %s: %v", digest, err) } defer stream.Close() - if err := dest.PutBlob(digest, stream); err != nil { + + // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader, + // use a separate validation failure indicator. + // Note that we don't use a stronger "validationSucceeded" indicator, because + // dest.PutBlob may detect that the layer already exists, in which case we don't + // read stream to the end, and validation does not happen. + validationFailed := false // This is a new instance on each loop iteration. + digestingReader, err := newDigestingReader(stream, digest, &validationFailed) + if err != nil { + return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err) + } + if err := dest.PutBlob(digest, digestingReader); err != nil { return fmt.Errorf("Error writing blob: %v", err) } + if validationFailed { // Coverage: This should never happen. + return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest) + } } sigs, err := src.Signatures() diff --git a/cmd/skopeo/copy_test.go b/cmd/skopeo/copy_test.go new file mode 100644 index 00000000..6601e069 --- /dev/null +++ b/cmd/skopeo/copy_test.go @@ -0,0 +1,62 @@ +package main + +import ( + "bytes" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNewDigestingReader(t *testing.T) { + // Only the failure cases, success is tested in TestDigestingReaderRead below. + source := bytes.NewReader([]byte("abc")) + for _, input := range []string{ + "abc", // Not algo:hexvalue + "crc32:", // Unknown algorithm, empty value + "crc32:012345678", // Unknown algorithm + "sha256:", // Empty value + "sha256:0", // Invalid hex value + "sha256:01", // Invalid length of hex value + } { + validationFailed := false + _, err := newDigestingReader(source, input, &validationFailed) + assert.Error(t, err, input) + } +} + +func TestDigestingReaderRead(t *testing.T) { + cases := []struct { + input []byte + digest string + }{ + {[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"}, + {[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"}, + {make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"}, + } + // Valid input + for _, c := range cases { + source := bytes.NewReader(c.input) + validationFailed := false + reader, err := newDigestingReader(source, c.digest, &validationFailed) + require.NoError(t, err, c.digest) + dest := bytes.Buffer{} + n, err := io.Copy(&dest, reader) + assert.NoError(t, err, c.digest) + assert.Equal(t, int64(len(c.input)), n, c.digest) + assert.Equal(t, c.input, dest.Bytes(), c.digest) + assert.False(t, validationFailed, c.digest) + } + // Modified input + for _, c := range cases { + source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil)) + validationFailed := false + reader, err := newDigestingReader(source, c.digest, &validationFailed) + require.NoError(t, err, c.digest) + dest := bytes.Buffer{} + _, err = io.Copy(&dest, reader) + assert.Error(t, err, c.digest) + assert.True(t, validationFailed) + } +}