mirror of
https://github.com/containers/skopeo.git
synced 2025-08-10 02:48:12 +00:00
commit
09399a9ac1
@ -1,8 +1,14 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/subtle"
|
||||||
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/signature"
|
"github.com/containers/image/signature"
|
||||||
@ -10,6 +16,65 @@ import (
|
|||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// supportedDigests lists the supported blob digest types.
|
||||||
|
var supportedDigests = map[string]func() hash.Hash{
|
||||||
|
"sha256": sha256.New,
|
||||||
|
}
|
||||||
|
|
||||||
|
type digestingReader struct {
|
||||||
|
source io.Reader
|
||||||
|
digest hash.Hash
|
||||||
|
expectedDigest []byte
|
||||||
|
failureIndicator *bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newDigestingReader returns an io.Reader with contents of source, which will eventually return a non-EOF error
|
||||||
|
// and set *failureIndicator to true if the source stream does not match expectedDigestString.
|
||||||
|
func newDigestingReader(source io.Reader, expectedDigestString string, failureIndicator *bool) (io.Reader, error) {
|
||||||
|
fields := strings.SplitN(expectedDigestString, ":", 2)
|
||||||
|
if len(fields) != 2 {
|
||||||
|
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString)
|
||||||
|
}
|
||||||
|
fn, ok := supportedDigests[fields[0]]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0])
|
||||||
|
}
|
||||||
|
digest := fn()
|
||||||
|
expectedDigest, err := hex.DecodeString(fields[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err)
|
||||||
|
}
|
||||||
|
if len(expectedDigest) != digest.Size() {
|
||||||
|
return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size())
|
||||||
|
}
|
||||||
|
return &digestingReader{
|
||||||
|
source: source,
|
||||||
|
digest: digest,
|
||||||
|
expectedDigest: expectedDigest,
|
||||||
|
failureIndicator: failureIndicator,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *digestingReader) Read(p []byte) (int, error) {
|
||||||
|
n, err := d.source.Read(p)
|
||||||
|
if n > 0 {
|
||||||
|
if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil {
|
||||||
|
// Coverage: This should not happen, the hash.Hash interface requires
|
||||||
|
// d.digest.Write to never return an error, and the io.Writer interface
|
||||||
|
// requires n2 == len(input) if no error is returned.
|
||||||
|
return 0, fmt.Errorf("Error updating digest during verification: %d vs. %d, %v", n2, n, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err == io.EOF {
|
||||||
|
actualDigest := d.digest.Sum(nil)
|
||||||
|
if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 {
|
||||||
|
*d.failureIndicator = true
|
||||||
|
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
func copyHandler(context *cli.Context) error {
|
func copyHandler(context *cli.Context) error {
|
||||||
if len(context.Args()) != 2 {
|
if len(context.Args()) != 2 {
|
||||||
return errors.New("Usage: copy source destination")
|
return errors.New("Usage: copy source destination")
|
||||||
@ -44,9 +109,23 @@ func copyHandler(context *cli.Context) error {
|
|||||||
return fmt.Errorf("Error reading blob %s: %v", digest, err)
|
return fmt.Errorf("Error reading blob %s: %v", digest, err)
|
||||||
}
|
}
|
||||||
defer stream.Close()
|
defer stream.Close()
|
||||||
if err := dest.PutBlob(digest, stream); err != nil {
|
|
||||||
|
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||||
|
// use a separate validation failure indicator.
|
||||||
|
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||||
|
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||||
|
// read stream to the end, and validation does not happen.
|
||||||
|
validationFailed := false // This is a new instance on each loop iteration.
|
||||||
|
digestingReader, err := newDigestingReader(stream, digest, &validationFailed)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
|
||||||
|
}
|
||||||
|
if err := dest.PutBlob(digest, digestingReader); err != nil {
|
||||||
return fmt.Errorf("Error writing blob: %v", err)
|
return fmt.Errorf("Error writing blob: %v", err)
|
||||||
}
|
}
|
||||||
|
if validationFailed { // Coverage: This should never happen.
|
||||||
|
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sigs, err := src.Signatures()
|
sigs, err := src.Signatures()
|
||||||
|
62
cmd/skopeo/copy_test.go
Normal file
62
cmd/skopeo/copy_test.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewDigestingReader(t *testing.T) {
|
||||||
|
// Only the failure cases, success is tested in TestDigestingReaderRead below.
|
||||||
|
source := bytes.NewReader([]byte("abc"))
|
||||||
|
for _, input := range []string{
|
||||||
|
"abc", // Not algo:hexvalue
|
||||||
|
"crc32:", // Unknown algorithm, empty value
|
||||||
|
"crc32:012345678", // Unknown algorithm
|
||||||
|
"sha256:", // Empty value
|
||||||
|
"sha256:0", // Invalid hex value
|
||||||
|
"sha256:01", // Invalid length of hex value
|
||||||
|
} {
|
||||||
|
validationFailed := false
|
||||||
|
_, err := newDigestingReader(source, input, &validationFailed)
|
||||||
|
assert.Error(t, err, input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDigestingReaderRead(t *testing.T) {
|
||||||
|
cases := []struct {
|
||||||
|
input []byte
|
||||||
|
digest string
|
||||||
|
}{
|
||||||
|
{[]byte(""), "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
|
||||||
|
{[]byte("abc"), "sha256:ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"},
|
||||||
|
{make([]byte, 65537, 65537), "sha256:3266304f31be278d06c3bd3eb9aa3e00c59bedec0a890de466568b0b90b0e01f"},
|
||||||
|
}
|
||||||
|
// Valid input
|
||||||
|
for _, c := range cases {
|
||||||
|
source := bytes.NewReader(c.input)
|
||||||
|
validationFailed := false
|
||||||
|
reader, err := newDigestingReader(source, c.digest, &validationFailed)
|
||||||
|
require.NoError(t, err, c.digest)
|
||||||
|
dest := bytes.Buffer{}
|
||||||
|
n, err := io.Copy(&dest, reader)
|
||||||
|
assert.NoError(t, err, c.digest)
|
||||||
|
assert.Equal(t, int64(len(c.input)), n, c.digest)
|
||||||
|
assert.Equal(t, c.input, dest.Bytes(), c.digest)
|
||||||
|
assert.False(t, validationFailed, c.digest)
|
||||||
|
}
|
||||||
|
// Modified input
|
||||||
|
for _, c := range cases {
|
||||||
|
source := bytes.NewReader(bytes.Join([][]byte{c.input, []byte("x")}, nil))
|
||||||
|
validationFailed := false
|
||||||
|
reader, err := newDigestingReader(source, c.digest, &validationFailed)
|
||||||
|
require.NoError(t, err, c.digest)
|
||||||
|
dest := bytes.Buffer{}
|
||||||
|
_, err = io.Copy(&dest, reader)
|
||||||
|
assert.Error(t, err, c.digest)
|
||||||
|
assert.True(t, validationFailed)
|
||||||
|
}
|
||||||
|
}
|
29
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
29
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
)
|
)
|
||||||
@ -31,18 +32,38 @@ func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
|||||||
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
return ioutil.WriteFile(d.ref.manifestPath(), manifest, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream as a blob identified by digest.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
||||||
func (d *dirImageDestination) PutBlob(digest string, stream io.Reader) error {
|
func (d *dirImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||||
layerFile, err := os.Create(d.ref.layerPath(digest))
|
blobPath := d.ref.layerPath(digest)
|
||||||
|
blobFile, err := ioutil.TempFile(filepath.Dir(blobPath), filepath.Base(blobPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer layerFile.Close()
|
succeeded := false
|
||||||
if _, err := io.Copy(layerFile, stream); err != nil {
|
defer func() {
|
||||||
|
blobFile.Close()
|
||||||
|
if !succeeded {
|
||||||
|
os.Remove(blobFile.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err := io.Copy(blobFile, stream); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := layerFile.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
succeeded = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
6
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
6
vendor/github.com/containers/image/directory/directory_transport.go
generated
vendored
@ -32,13 +32,17 @@ func (t dirTransport) ParseReference(reference string) (types.ImageReference, er
|
|||||||
// scope passed to this function will not be "", that value is always allowed.
|
// scope passed to this function will not be "", that value is always allowed.
|
||||||
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
func (t dirTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
if !strings.HasPrefix(scope, "/") {
|
if !strings.HasPrefix(scope, "/") {
|
||||||
return fmt.Errorf("Invalid scope %s: must be an absolute path", scope)
|
return fmt.Errorf("Invalid scope %s: Must be an absolute path", scope)
|
||||||
}
|
}
|
||||||
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
// Refuse also "/", otherwise "/" and "" would have the same semantics,
|
||||||
// and "" could be unexpectedly shadowed by the "/" entry.
|
// and "" could be unexpectedly shadowed by the "/" entry.
|
||||||
if scope == "/" {
|
if scope == "/" {
|
||||||
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||||
}
|
}
|
||||||
|
cleaned := filepath.Clean(scope)
|
||||||
|
if cleaned != scope {
|
||||||
|
return fmt.Errorf(`Invalid scope %s: Uses non-canonical format, perhaps try %s`, scope, cleaned)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
5
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@ -74,6 +74,11 @@ func (d *dockerImageDestination) PutManifest(m []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream as a blob identified by digest.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
||||||
func (d *dockerImageDestination) PutBlob(digest string, stream io.Reader) error {
|
func (d *dockerImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||||
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest)
|
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest)
|
||||||
|
|
||||||
|
27
vendor/github.com/containers/image/oci/oci_dest.go
generated
vendored
27
vendor/github.com/containers/image/oci/oci_dest.go
generated
vendored
@ -110,22 +110,41 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
|
|||||||
return ioutil.WriteFile(descriptorPath, data, 0644)
|
return ioutil.WriteFile(descriptorPath, data, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream as a blob identified by digest.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
||||||
func (d *ociImageDestination) PutBlob(digest string, stream io.Reader) error {
|
func (d *ociImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||||
blobPath := d.ref.blobPath(digest)
|
blobPath := d.ref.blobPath(digest)
|
||||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
blob, err := os.Create(blobPath)
|
blobFile, err := ioutil.TempFile(filepath.Dir(blobPath), filepath.Base(blobPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer blob.Close()
|
succeeded := false
|
||||||
if _, err := io.Copy(blob, stream); err != nil {
|
defer func() {
|
||||||
|
blobFile.Close()
|
||||||
|
if !succeeded {
|
||||||
|
os.Remove(blobFile.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if _, err := io.Copy(blobFile, stream); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := blob.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
succeeded = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
vendor/github.com/containers/image/oci/oci_transport.go
generated
vendored
4
vendor/github.com/containers/image/oci/oci_transport.go
generated
vendored
@ -58,6 +58,10 @@ func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error {
|
|||||||
if scope == "/" {
|
if scope == "/" {
|
||||||
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
return errors.New(`Invalid scope "/": Use the generic default scope ""`)
|
||||||
}
|
}
|
||||||
|
cleaned := filepath.Clean(dir)
|
||||||
|
if cleaned != dir {
|
||||||
|
return fmt.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
5
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
5
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -368,6 +368,11 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream as a blob identified by digest.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
||||||
func (d *openshiftImageDestination) PutBlob(digest string, stream io.Reader) error {
|
func (d *openshiftImageDestination) PutBlob(digest string, stream io.Reader) error {
|
||||||
return d.docker.PutBlob(digest, stream)
|
return d.docker.PutBlob(digest, stream)
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containers/image/types/types.go
generated
vendored
4
vendor/github.com/containers/image/types/types.go
generated
vendored
@ -104,6 +104,10 @@ type ImageDestination interface {
|
|||||||
Reference() ImageReference
|
Reference() ImageReference
|
||||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||||
PutManifest([]byte) error
|
PutManifest([]byte) error
|
||||||
|
// PutBlob writes contents of stream as a blob identified by digest.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
// Note: Calling PutBlob() and other methods may have ordering dependencies WRT other methods of this type. FIXME: Figure out and document.
|
||||||
PutBlob(digest string, stream io.Reader) error
|
PutBlob(digest string, stream io.Reader) error
|
||||||
PutSignatures(signatures [][]byte) error
|
PutSignatures(signatures [][]byte) error
|
||||||
|
Loading…
Reference in New Issue
Block a user