Merge pull request #248 from Crazykev/use-docker-digest

Use docker/distribution/digest
This commit is contained in:
Antonio Murdaca 2016-11-28 17:31:33 +01:00 committed by GitHub
commit d69c51e958
46 changed files with 566 additions and 256 deletions

View File

@ -7,6 +7,7 @@ import (
"github.com/containers/image/docker" "github.com/containers/image/docker"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/docker/distribution/digest"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -14,7 +15,7 @@ import (
type inspectOutput struct { type inspectOutput struct {
Name string `json:",omitempty"` Name string `json:",omitempty"`
Tag string `json:",omitempty"` Tag string `json:",omitempty"`
Digest string Digest digest.Digest
RepoTags []string RepoTags []string
Created time.Time Created time.Time
DockerVersion string DockerVersion string

View File

@ -11,6 +11,7 @@ import (
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/urfave/cli" "github.com/urfave/cli"
) )
@ -39,10 +40,21 @@ var layersCmd = cli.Command{
} }
defer src.Close() defer src.Close()
blobDigests := c.Args().Tail() var blobDigests []digest.Digest
for _, dString := range c.Args().Tail() {
if !strings.HasPrefix(dString, "sha256:") {
dString = "sha256:" + dString
}
d, err := digest.ParseDigest(dString)
if err != nil {
return err
}
blobDigests = append(blobDigests, d)
}
if len(blobDigests) == 0 { if len(blobDigests) == 0 {
layers := src.LayerInfos() layers := src.LayerInfos()
seenLayers := map[string]struct{}{} seenLayers := map[digest.Digest]struct{}{}
for _, info := range layers { for _, info := range layers {
if _, ok := seenLayers[info.Digest]; !ok { if _, ok := seenLayers[info.Digest]; !ok {
blobDigests = append(blobDigests, info.Digest) blobDigests = append(blobDigests, info.Digest)
@ -70,9 +82,6 @@ var layersCmd = cli.Command{
defer dest.Close() defer dest.Close()
for _, digest := range blobDigests { for _, digest := range blobDigests {
if !strings.HasPrefix(digest, "sha256:") {
digest = "sha256:" + digest
}
r, blobSize, err := rawSource.GetBlob(digest) r, blobSize, err := rawSource.GetBlob(digest)
if err != nil { if err != nil {
return err return err

View File

@ -27,5 +27,5 @@ func TestManifestDigest(t *testing.T) {
// Success // Success
out, err = runSkopeo("manifest-digest", "fixtures/image.manifest.json") out, err = runSkopeo("manifest-digest", "fixtures/image.manifest.json")
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, fixturesTestImageManifestDigest+"\n", out) assert.Equal(t, fixturesTestImageManifestDigest.String()+"\n", out)
} }

View File

@ -6,13 +6,14 @@ import (
"testing" "testing"
"github.com/containers/image/signature" "github.com/containers/image/signature"
"github.com/docker/distribution/digest"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
const ( const (
// fixturesTestImageManifestDigest is the Docker manifest digest of "image.manifest.json" // fixturesTestImageManifestDigest is the Docker manifest digest of "image.manifest.json"
fixturesTestImageManifestDigest = "sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55" fixturesTestImageManifestDigest = digest.Digest("sha256:20bf21ed457b390829cdbeec8795a7bea1626991fda603e0d01b4e7f60427e55")
// fixturesTestKeyFingerprint is the fingerprint of the private key. // fixturesTestKeyFingerprint is the fingerprint of the private key.
fixturesTestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8" fixturesTestKeyFingerprint = "1D8230F6CDB6A06716E414C1DB72F2188BB46CC8"
) )
@ -122,5 +123,5 @@ func TestStandaloneVerify(t *testing.T) {
out, err = runSkopeo("standalone-verify", manifestPath, out, err = runSkopeo("standalone-verify", manifestPath,
dockerReference, fixturesTestKeyFingerprint, signaturePath) dockerReference, fixturesTestKeyFingerprint, signaturePath)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "Signature verified, digest "+fixturesTestImageManifestDigest+"\n", out) assert.Equal(t, "Signature verified, digest "+fixturesTestImageManifestDigest.String()+"\n", out)
} }

View File

@ -11,6 +11,7 @@ import (
"strings" "strings"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/docker/distribution/digest"
"github.com/go-check/check" "github.com/go-check/check"
) )
@ -167,7 +168,7 @@ func (s *CopySuite) TestCopyStreaming(c *check.C) {
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2) assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "atomic:localhost:5000/myns/unsigned:streaming", "dir:"+dir2)
// The manifests will have different JWS signatures; so, compare the manifests by digests, which // The manifests will have different JWS signatures; so, compare the manifests by digests, which
// strips the signatures, and remove them, comparing the rest file by file. // strips the signatures, and remove them, comparing the rest file by file.
digests := []string{} digests := []digest.Digest{}
for _, dir := range []string{dir1, dir2} { for _, dir := range []string{dir1, dir2} {
manifestPath := filepath.Join(dir, "manifest.json") manifestPath := filepath.Join(dir, "manifest.json")
m, err := ioutil.ReadFile(manifestPath) m, err := ioutil.ReadFile(manifestPath)

View File

@ -3,16 +3,11 @@ package copy
import ( import (
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"crypto/sha256"
"crypto/subtle"
"encoding/hex"
"errors" "errors"
"fmt" "fmt"
"hash"
"io" "io"
"io/ioutil" "io/ioutil"
"reflect" "reflect"
"strings"
pb "gopkg.in/cheggaaa/pb.v1" pb "gopkg.in/cheggaaa/pb.v1"
@ -22,6 +17,7 @@ import (
"github.com/containers/image/signature" "github.com/containers/image/signature"
"github.com/containers/image/transports" "github.com/containers/image/transports"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert. // preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
@ -29,40 +25,26 @@ import (
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used. // Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType} var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
// supportedDigests lists the supported blob digest types.
var supportedDigests = map[string]func() hash.Hash{
"sha256": sha256.New,
}
type digestingReader struct { type digestingReader struct {
source io.Reader source io.Reader
digest hash.Hash digester digest.Digester
expectedDigest []byte expectedDigest digest.Digest
validationFailed bool validationFailed bool
} }
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
// and set validationFailed to true if the source stream does not match expectedDigestString. // and set validationFailed to true if the source stream does not match expectedDigest.
func newDigestingReader(source io.Reader, expectedDigestString string) (*digestingReader, error) { func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
fields := strings.SplitN(expectedDigestString, ":", 2) if err := expectedDigest.Validate(); err != nil {
if len(fields) != 2 { return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigestString)
} }
fn, ok := supportedDigests[fields[0]] digestAlgorithm := expectedDigest.Algorithm()
if !ok { if !digestAlgorithm.Available() {
return nil, fmt.Errorf("Invalid digest specification %s: unknown digest type %s", expectedDigestString, fields[0]) return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
}
digest := fn()
expectedDigest, err := hex.DecodeString(fields[1])
if err != nil {
return nil, fmt.Errorf("Invalid digest value %s: %v", expectedDigestString, err)
}
if len(expectedDigest) != digest.Size() {
return nil, fmt.Errorf("Invalid digest specification %s: length %d does not match %d", expectedDigestString, len(expectedDigest), digest.Size())
} }
return &digestingReader{ return &digestingReader{
source: source, source: source,
digest: digest, digester: digestAlgorithm.New(),
expectedDigest: expectedDigest, expectedDigest: expectedDigest,
validationFailed: false, validationFailed: false,
}, nil }, nil
@ -71,7 +53,7 @@ func newDigestingReader(source io.Reader, expectedDigestString string) (*digesti
func (d *digestingReader) Read(p []byte) (int, error) { func (d *digestingReader) Read(p []byte) (int, error) {
n, err := d.source.Read(p) n, err := d.source.Read(p)
if n > 0 { if n > 0 {
if n2, err := d.digest.Write(p[:n]); n2 != n || err != nil { if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
// Coverage: This should not happen, the hash.Hash interface requires // Coverage: This should not happen, the hash.Hash interface requires
// d.digest.Write to never return an error, and the io.Writer interface // d.digest.Write to never return an error, and the io.Writer interface
// requires n2 == len(input) if no error is returned. // requires n2 == len(input) if no error is returned.
@ -79,10 +61,10 @@ func (d *digestingReader) Read(p []byte) (int, error) {
} }
} }
if err == io.EOF { if err == io.EOF {
actualDigest := d.digest.Sum(nil) actualDigest := d.digester.Digest()
if subtle.ConstantTimeCompare(actualDigest, d.expectedDigest) != 1 { if actualDigest != d.expectedDigest {
d.validationFailed = true d.validationFailed = true
return 0, fmt.Errorf("Digest did not match, expected %s, got %s", hex.EncodeToString(d.expectedDigest), hex.EncodeToString(actualDigest)) return 0, fmt.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
} }
} }
return n, err return n, err
@ -236,7 +218,7 @@ func copyLayers(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDe
srcInfos := src.LayerInfos() srcInfos := src.LayerInfos()
destInfos := []types.BlobInfo{} destInfos := []types.BlobInfo{}
diffIDs := []string{} diffIDs := []string{}
copiedLayers := map[string]copiedLayer{} copiedLayers := map[digest.Digest]copiedLayer{}
for _, srcLayer := range srcInfos { for _, srcLayer := range srcInfos {
cl, ok := copiedLayers[srcLayer.Digest] cl, ok := copiedLayers[srcLayer.Digest]
if !ok { if !ok {
@ -245,7 +227,7 @@ func copyLayers(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDe
if err != nil { if err != nil {
return err return err
} }
cl = copiedLayer{blobInfo: destInfo, diffID: diffID} cl = copiedLayer{blobInfo: destInfo, diffID: diffID.String()}
copiedLayers[srcLayer.Digest] = cl copiedLayers[srcLayer.Digest] = cl
} }
destInfos = append(destInfos, cl.blobInfo) destInfos = append(destInfos, cl.blobInfo)
@ -297,14 +279,14 @@ func copyConfig(dest types.ImageDestination, src types.Image, reportWriter io.Wr
// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine. // diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation. // We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
type diffIDResult struct { type diffIDResult struct {
digest string digest digest.Digest
err error err error
} }
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress, // copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo, func copyLayer(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo,
diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, string, error) { diffIDIsNeeded bool, canCompress bool, reportWriter io.Writer) (types.BlobInfo, digest.Digest, error) {
srcStream, srcBlobSize, err := src.GetBlob(srcInfo.Digest) // We currently completely ignore srcInfo.Size throughout. srcStream, srcBlobSize, err := src.GetBlob(srcInfo.Digest) // We currently completely ignore srcInfo.Size throughout.
if err != nil { if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err) return types.BlobInfo{}, "", fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
@ -375,7 +357,7 @@ func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadClo
} }
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest. // computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
func computeDiffID(stream io.Reader, decompressor decompressorFunc) (string, error) { func computeDiffID(stream io.Reader, decompressor decompressorFunc) (digest.Digest, error) {
if decompressor != nil { if decompressor != nil {
s, err := decompressor(stream) s, err := decompressor(stream)
if err != nil { if err != nil {
@ -384,13 +366,7 @@ func computeDiffID(stream io.Reader, decompressor decompressorFunc) (string, err
stream = s stream = s
} }
h := sha256.New() return digest.Canonical.FromReader(stream)
_, err := io.Copy(h, stream)
if err != nil {
return "", err
}
hash := h.Sum(nil)
return "sha256:" + hex.EncodeToString(hash[:]), nil
} }
// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest, // copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,

View File

@ -1,14 +1,13 @@
package directory package directory
import ( import (
"crypto/sha256"
"encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
type dirImageDestination struct { type dirImageDestination struct {
@ -64,14 +63,14 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
} }
}() }()
h := sha256.New() digester := digest.Canonical.New()
tee := io.TeeReader(stream, h) tee := io.TeeReader(stream, digester.Hash())
size, err := io.Copy(blobFile, tee) size, err := io.Copy(blobFile, tee)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
computedDigest := hex.EncodeToString(h.Sum(nil)) computedDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size { if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
} }
@ -86,7 +85,7 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
succeeded = true succeeded = true
return types.BlobInfo{Digest: "sha256:" + computedDigest, Size: size}, nil return types.BlobInfo{Digest: computedDigest, Size: size}, nil
} }
func (d *dirImageDestination) PutManifest(manifest []byte) error { func (d *dirImageDestination) PutManifest(manifest []byte) error {

View File

@ -6,7 +6,9 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
type dirImageSource struct { type dirImageSource struct {
@ -29,21 +31,22 @@ func (s *dirImageSource) Reference() types.ImageReference {
func (s *dirImageSource) Close() { func (s *dirImageSource) Close() {
} }
// it's up to the caller to determine the MIME type of the returned manifest's bytes // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *dirImageSource) GetManifest() ([]byte, string, error) { func (s *dirImageSource) GetManifest() ([]byte, string, error) {
m, err := ioutil.ReadFile(s.ref.manifestPath()) m, err := ioutil.ReadFile(s.ref.manifestPath())
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
return m, "", err return m, manifest.GuessMIMEType(m), err
} }
func (s *dirImageSource) GetTargetManifest(digest string) ([]byte, string, error) { func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return nil, "", fmt.Errorf("Getting target manifest not supported by dir:") return nil, "", fmt.Errorf(`Getting target manifest not supported by "dir:"`)
} }
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
func (s *dirImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { func (s *dirImageSource) GetBlob(digest digest.Digest) (io.ReadCloser, int64, error) {
r, err := os.Open(s.ref.layerPath(digest)) r, err := os.Open(s.ref.layerPath(digest))
if err != nil { if err != nil {
return nil, 0, nil return nil, 0, nil

View File

@ -10,6 +10,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/image" "github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
// Transport is an ImageTransport for directory paths. // Transport is an ImageTransport for directory paths.
@ -161,9 +162,9 @@ func (ref dirReference) manifestPath() string {
} }
// layerPath returns a path for a layer tarball within a directory using our conventions. // layerPath returns a path for a layer tarball within a directory using our conventions.
func (ref dirReference) layerPath(digest string) string { func (ref dirReference) layerPath(digest digest.Digest) string {
// FIXME: Should we keep the digest identification? // FIXME: Should we keep the digest identification?
return filepath.Join(ref.path, strings.TrimPrefix(digest, "sha256:")+".tar") return filepath.Join(ref.path, digest.Hex()+".tar")
} }
// signaturePath returns a path for a signature within a directory using our conventions. // signaturePath returns a path for a signature within a directory using our conventions.

View File

@ -3,8 +3,6 @@ package daemon
import ( import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -14,14 +12,17 @@ import (
"time" "time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/docker/engine-api/client" "github.com/docker/engine-api/client"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
type daemonImageDestination struct { type daemonImageDestination struct {
ref daemonReference ref daemonReference
namedTaggedRef reference.NamedTagged // Strictly speaking redundant with ref above; having the field makes it structurally impossible for later users to fail.
// For talking to imageLoadGoroutine // For talking to imageLoadGoroutine
goroutineCancel context.CancelFunc goroutineCancel context.CancelFunc
statusChannel <-chan error statusChannel <-chan error
@ -33,7 +34,14 @@ type daemonImageDestination struct {
// newImageDestination returns a types.ImageDestination for the specified image reference. // newImageDestination returns a types.ImageDestination for the specified image reference.
func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) {
// FIXME: Do something with ref if ref.ref == nil {
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
namedTaggedRef, ok := ref.ref.(reference.NamedTagged)
if !ok {
return nil, fmt.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport())
}
c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host
if err != nil { if err != nil {
return nil, fmt.Errorf("Error initializing docker engine client: %v", err) return nil, fmt.Errorf("Error initializing docker engine client: %v", err)
@ -48,6 +56,7 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
return &daemonImageDestination{ return &daemonImageDestination{
ref: ref, ref: ref,
namedTaggedRef: namedTaggedRef,
goroutineCancel: goroutineCancel, goroutineCancel: goroutineCancel,
statusChannel: statusChannel, statusChannel: statusChannel,
writer: writer, writer: writer,
@ -105,7 +114,7 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
// If an empty slice or nil it's returned, then any mime type can be tried to upload // If an empty slice or nil it's returned, then any mime type can be tried to upload
func (d *daemonImageDestination) SupportedManifestMIMETypes() []string { func (d *daemonImageDestination) SupportedManifestMIMETypes() []string {
return []string{ return []string{
manifest.DockerV2Schema2MediaType, // FIXME: Handle others. manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
} }
} }
@ -127,8 +136,8 @@ func (d *daemonImageDestination) ShouldCompressLayers() bool {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
if inputInfo.Digest == "" { if inputInfo.Digest.String() == "" {
return types.BlobInfo{}, fmt.Errorf("Can not stream a blob with unknown digest to docker-daemon:") return types.BlobInfo{}, fmt.Errorf(`"Can not stream a blob with unknown digest to "docker-daemon:"`)
} }
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
@ -153,12 +162,12 @@ func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
logrus.Debugf("… streaming done") logrus.Debugf("… streaming done")
} }
hash := sha256.New() digester := digest.Canonical.New()
tee := io.TeeReader(stream, hash) tee := io.TeeReader(stream, digester.Hash())
if err := d.sendFile(inputInfo.Digest, inputInfo.Size, tee); err != nil { if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
return types.BlobInfo{Digest: "sha256:" + hex.EncodeToString(hash.Sum(nil)), Size: inputInfo.Size}, nil return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
} }
func (d *daemonImageDestination) PutManifest(m []byte) error { func (d *daemonImageDestination) PutManifest(m []byte) error {
@ -167,17 +176,35 @@ func (d *daemonImageDestination) PutManifest(m []byte) error {
return fmt.Errorf("Error parsing manifest: %v", err) return fmt.Errorf("Error parsing manifest: %v", err)
} }
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType { if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
// FIXME FIXME: Teach copy.go about this.
return fmt.Errorf("Unsupported manifest type, need a Docker schema 2 manifest") return fmt.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
} }
layerPaths := []string{} layerPaths := []string{}
for _, l := range man.Layers { for _, l := range man.Layers {
layerPaths = append(layerPaths, l.Digest) layerPaths = append(layerPaths, l.Digest.String())
} }
// For github.com/docker/docker consumers, this works just as well as
// refString := d.namedTaggedRef.String() [i.e. d.ref.ref.String()]
// because when reading the RepoTags strings, github.com/docker/docker/reference
// normalizes both of them to the same value.
//
// Doing it this way to include the normalized-out `docker.io[/library]` does make
// a difference for github.com/projectatomic/docker consumers, with the
// “Add --add-registry and --block-registry options to docker daemon” patch.
// These consumers treat reference strings which include a hostname and reference
// strings without a hostname differently.
//
// Using the host name here is more explicit about the intent, and it has the same
// effect as (docker pull) in projectatomic/docker, which tags the result using
// a hostname-qualified reference.
// See https://github.com/containers/image/issues/72 for a more detailed
// analysis and explanation.
refString := fmt.Sprintf("%s:%s", d.namedTaggedRef.FullName(), d.namedTaggedRef.Tag())
items := []manifestItem{{ items := []manifestItem{{
Config: man.Config.Digest, Config: man.Config.Digest.String(),
RepoTags: []string{string(d.ref)}, // FIXME: Only if ref is a NamedTagged RepoTags: []string{refString},
Layers: layerPaths, Layers: layerPaths,
Parent: "", Parent: "",
LayerSources: nil, LayerSources: nil,

View File

@ -3,8 +3,6 @@ package daemon
import ( import (
"archive/tar" "archive/tar"
"bytes" "bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io" "io"
@ -14,6 +12,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/docker/engine-api/client" "github.com/docker/engine-api/client"
"golang.org/x/net/context" "golang.org/x/net/context"
) )
@ -26,7 +25,7 @@ type daemonImageSource struct {
// The following data is only available after ensureCachedDataIsPresent() succeeds // The following data is only available after ensureCachedDataIsPresent() succeeds
tarManifest *manifestItem // nil if not available yet. tarManifest *manifestItem // nil if not available yet.
configBytes []byte configBytes []byte
configDigest string configDigest digest.Digest
orderedDiffIDList []diffID orderedDiffIDList []diffID
knownLayers map[diffID]*layerInfo knownLayers map[diffID]*layerInfo
// Other state // Other state
@ -52,7 +51,9 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS
if err != nil { if err != nil {
return nil, fmt.Errorf("Error initializing docker engine client: %v", err) return nil, fmt.Errorf("Error initializing docker engine client: %v", err)
} }
inputStream, err := c.ImageSave(context.TODO(), []string{string(ref)}) // FIXME: ref should be per docker/reference.ParseIDOrReference, and we don't want NameOnly // Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(context.TODO(), []string{ref.StringWithinTransport()})
if err != nil { if err != nil {
return nil, fmt.Errorf("Error loading image from docker engine: %v", err) return nil, fmt.Errorf("Error loading image from docker engine: %v", err)
} }
@ -200,7 +201,7 @@ func (s *daemonImageSource) ensureCachedDataIsPresent() error {
if err != nil { if err != nil {
return err return err
} }
var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs. var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs.
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
return fmt.Errorf("Error decoding tar config %s: %v", tarManifest.Config, err) return fmt.Errorf("Error decoding tar config %s: %v", tarManifest.Config, err)
} }
@ -211,10 +212,9 @@ func (s *daemonImageSource) ensureCachedDataIsPresent() error {
} }
// Success; commit. // Success; commit.
configHash := sha256.Sum256(configBytes)
s.tarManifest = tarManifest s.tarManifest = tarManifest
s.configBytes = configBytes s.configBytes = configBytes
s.configDigest = "sha256:" + hex.EncodeToString(configHash[:]) s.configDigest = digest.FromBytes(configBytes)
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
s.knownLayers = knownLayers s.knownLayers = knownLayers
return nil return nil
@ -237,7 +237,7 @@ func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
return &items[0], nil return &items[0], nil
} }
func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) {
// Collect layer data available in manifest and config. // Collect layer data available in manifest and config.
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) return nil, fmt.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
@ -290,7 +290,7 @@ func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedCo
return knownLayers, nil return knownLayers, nil
} }
// GetManifest returns the image's manifest along with its MIME type. The empty string is returned if the MIME type is unknown. // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service. // It may use a remote (= slow) service.
func (s *daemonImageSource) GetManifest() ([]byte, string, error) { func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
if s.generatedManifest == nil { if s.generatedManifest == nil {
@ -313,7 +313,7 @@ func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID) return nil, "", fmt.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
} }
m.Layers = append(m.Layers, distributionDescriptor{ m.Layers = append(m.Layers, distributionDescriptor{
Digest: string(diffID), // diffID is a digest of the uncompressed tarball Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
MediaType: manifest.DockerV2Schema2LayerMediaType, MediaType: manifest.DockerV2Schema2LayerMediaType,
Size: li.size, Size: li.size,
}) })
@ -329,13 +329,13 @@ func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
// out of a manifest list. // out of a manifest list.
func (s *daemonImageSource) GetTargetManifest(digest string) ([]byte, string, error) { func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
return nil, "", fmt.Errorf("Manifests list are not supported by docker-daemon:") return nil, "", fmt.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
} }
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
func (s *daemonImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { func (s *daemonImageSource) GetBlob(digest digest.Digest) (io.ReadCloser, int64, error) {
if err := s.ensureCachedDataIsPresent(); err != nil { if err := s.ensureCachedDataIsPresent(); err != nil {
return nil, 0, err return nil, 0, err
} }

View File

@ -1,10 +1,13 @@
package daemon package daemon
import ( import (
"errors"
"fmt" "fmt"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
// Transport is an ImageTransport for images managed by a local Docker daemon. // Transport is an ImageTransport for images managed by a local Docker daemon.
@ -27,19 +30,69 @@ func (t daemonTransport) ParseReference(reference string) (types.ImageReference,
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
// scope passed to this function will not be "", that value is always allowed. // scope passed to this function will not be "", that value is always allowed.
func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error { func (t daemonTransport) ValidatePolicyConfigurationScope(scope string) error {
// FIXME FIXME // See the explanation in daemonReference.PolicyConfigurationIdentity.
return nil return errors.New(`docker-daemon: does not support any scopes except the default "" one`)
} }
// daemonReference is an ImageReference for images managed by a local Docker daemon. // daemonReference is an ImageReference for images managed by a local Docker daemon
type daemonReference string // FIXME FIXME // Exactly one of id and ref can be set.
// For daemonImageSource, both id and ref are acceptable, ref must not be a NameOnly (interpreted as all tags in that repository by the daemon)
// For daemonImageDestination, it must be a ref, which is NamedTagged.
// (We could, in principle, also allow storing images without tagging them, and the user would have to refer to them using the docker image ID = config digest.
// Using the config digest requires the caller to parse the manifest themselves, which is very cumbersome; so, for now, we dont bother.)
type daemonReference struct {
id digest.Digest
ref reference.Named // !reference.IsNameOnly
}
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
func ParseReference(reference string) (types.ImageReference, error) { func ParseReference(refString string) (types.ImageReference, error) {
return daemonReference(reference), nil // FIXME FIXME // This is intended to be compatible with reference.ParseIDOrReference, but more strict about refusing some of the ambiguous cases.
// In particular, this rejects unprefixed digest values (64 hex chars), and sha256 digest prefixes (sha256:fewer-than-64-hex-chars).
// digest:hexstring is structurally the same as a reponame:tag (meaning docker.io/library/reponame:tag).
// reference.ParseIDOrReference interprets such strings as digests.
if dgst, err := digest.ParseDigest(refString); err == nil {
// The daemon explicitly refuses to tag images with a reponame equal to digest.Canonical - but _only_ this digest name.
// Other digest references are ambiguous, so refuse them.
if dgst.Algorithm() != digest.Canonical {
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: only digest algorithm %s accepted", refString, digest.Canonical)
}
return NewReference(dgst, nil)
}
ref, err := reference.ParseNamed(refString) // This also rejects unprefixed digest values
if err != nil {
return nil, err
}
if ref.Name() == digest.Canonical.String() {
return nil, fmt.Errorf("Invalid docker-daemon: reference %s: The %s repository name is reserved for (non-shortened) digest references", refString, digest.Canonical)
}
return NewReference("", ref)
} }
// FIXME FIXME: NewReference? // NewReference returns a docker-daemon reference for either the supplied image ID (config digest) or the supplied reference (which must satisfy !reference.IsNameOnly)
func NewReference(id digest.Digest, ref reference.Named) (types.ImageReference, error) {
if id != "" && ref != nil {
return nil, errors.New("docker-daemon: reference must not have an image ID and a reference string specified at the same time")
}
if ref != nil {
if reference.IsNameOnly(ref) {
return nil, fmt.Errorf("docker-daemon: reference %s has neither a tag nor a digest", ref.String())
}
// A github.com/distribution/reference value can have a tag and a digest at the same time!
// docker/reference does not handle that, so fail.
_, isTagged := ref.(reference.NamedTagged)
_, isDigested := ref.(reference.Canonical)
if isTagged && isDigested {
return nil, fmt.Errorf("docker-daemon: references with both a tag and digest are currently not supported")
}
}
return daemonReference{
id: id,
ref: ref,
}, nil
}
func (ref daemonReference) Transport() types.ImageTransport { func (ref daemonReference) Transport() types.ImageTransport {
return Transport return Transport
@ -52,14 +105,21 @@ func (ref daemonReference) Transport() types.ImageTransport {
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
// instead, see transports.ImageName(). // instead, see transports.ImageName().
func (ref daemonReference) StringWithinTransport() string { func (ref daemonReference) StringWithinTransport() string {
return string(ref) // FIXME FIXME switch {
case ref.id != "":
return ref.id.String()
case ref.ref != nil:
return ref.ref.String()
default: // Coverage: Should never happen, NewReference above should refuse such values.
panic("Internal inconsistency: daemonReference has empty id and nil ref")
}
} }
// DockerReference returns a Docker reference associated with this reference // DockerReference returns a Docker reference associated with this reference
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
// not e.g. after redirect or alias processing), or nil if unknown/not applicable. // not e.g. after redirect or alias processing), or nil if unknown/not applicable.
func (ref daemonReference) DockerReference() reference.Named { func (ref daemonReference) DockerReference() reference.Named {
return nil // FIXME FIXME return ref.ref // May be nil
} }
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
@ -70,7 +130,10 @@ func (ref daemonReference) DockerReference() reference.Named {
// not required/guaranteed that it will be a valid input to Transport().ParseReference(). // not required/guaranteed that it will be a valid input to Transport().ParseReference().
// Returns "" if configuration identities for these references are not supported. // Returns "" if configuration identities for these references are not supported.
func (ref daemonReference) PolicyConfigurationIdentity() string { func (ref daemonReference) PolicyConfigurationIdentity() string {
return string(ref) // FIXME FIXME // We must allow referring to images in the daemon by image ID, otherwise untagged images would not be accessible.
// But the existence of image IDs means that we cant truly well namespace the input; the untagged images would have to fall into the default policy,
// which can be unexpected. So, punt.
return "" // This still allows using the default "" scope to define a policy for this transport.
} }
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
@ -79,13 +142,18 @@ func (ref daemonReference) PolicyConfigurationIdentity() string {
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
// and each following element to be a prefix of the element preceding it. // and each following element to be a prefix of the element preceding it.
func (ref daemonReference) PolicyConfigurationNamespaces() []string { func (ref daemonReference) PolicyConfigurationNamespaces() []string {
return []string{} // FIXME FIXME? // See the explanation in daemonReference.PolicyConfigurationIdentity.
return []string{}
} }
// NewImage returns a types.Image for this reference. // NewImage returns a types.Image for this reference.
// The caller must call .Close() on the returned Image. // The caller must call .Close() on the returned Image.
func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) { func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
panic("FIXME FIXME") src, err := newImageSource(ctx, ref)
if err != nil {
return nil, err
}
return image.FromSource(src)
} }
// NewImageSource returns a types.ImageSource for this reference, // NewImageSource returns a types.ImageSource for this reference,
@ -104,5 +172,8 @@ func (ref daemonReference) NewImageDestination(ctx *types.SystemContext) (types.
// DeleteImage deletes the named image from the registry, if supported. // DeleteImage deletes the named image from the registry, if supported.
func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error { func (ref daemonReference) DeleteImage(ctx *types.SystemContext) error {
return fmt.Errorf("Deleting images not implemented for docker-daemon: images") // FIXME FIXME? // Should this just untag the image? Should this stop running containers?
// The semantics is not quite as clear as for remote repositories.
// The user can run (docker rmi) directly anyway, so, for now(?), punt instead of trying to guess what the user meant.
return fmt.Errorf("Deleting images not implemented for docker-daemon: images")
} }

View File

@ -1,5 +1,7 @@
package daemon package daemon
import "github.com/docker/distribution/digest"
// Various data structures. // Various data structures.
// Based on github.com/docker/docker/image/tarexport/tarexport.go // Based on github.com/docker/docker/image/tarexport/tarexport.go
@ -24,10 +26,10 @@ type diffID string
// Based on github.com/docker/distribution/blobs.go // Based on github.com/docker/distribution/blobs.go
type distributionDescriptor struct { type distributionDescriptor struct {
MediaType string `json:"mediaType,omitempty"` MediaType string `json:"mediaType,omitempty"`
Size int64 `json:"size,omitempty"` Size int64 `json:"size,omitempty"`
Digest string `json:"digest,omitempty"` Digest digest.Digest `json:"digest,omitempty"`
URLs []string `json:"urls,omitempty"` URLs []string `json:"urls,omitempty"`
} }
// Based on github.com/docker/distribution/manifest/schema2/manifest.go // Based on github.com/docker/distribution/manifest/schema2/manifest.go
@ -41,7 +43,7 @@ type schema2Manifest struct {
// Based on github.com/docker/docker/image/image.go // Based on github.com/docker/docker/image/image.go
// MOST CONTENT OMITTED AS UNNECESSARY // MOST CONTENT OMITTED AS UNNECESSARY
type image struct { type dockerImage struct {
RootFS *rootFS `json:"rootfs,omitempty"` RootFS *rootFS `json:"rootfs,omitempty"`
} }

View File

@ -7,14 +7,18 @@ import (
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"net"
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"time"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/docker/pkg/homedir" "github.com/docker/docker/pkg/homedir"
"github.com/docker/go-connections/sockets"
"github.com/docker/go-connections/tlsconfig"
) )
const ( const (
@ -45,6 +49,38 @@ type dockerClient struct {
signatureBase signatureStorageBase signatureBase signatureStorageBase
} }
// this is cloned from docker/go-connections because upstream docker has changed
// it and make deps here fails otherwise.
// We'll drop this once we upgrade to docker 1.13.x deps.
func serverDefault() *tls.Config {
return &tls.Config{
// Avoid fallback to SSL protocols < TLS1.0
MinVersion: tls.VersionTLS10,
PreferServerCipherSuites: true,
CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
}
}
func newTransport() *http.Transport {
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: direct.Dial,
TLSHandshakeTimeout: 10 * time.Second,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
}
proxyDialer, err := sockets.DialerFromEnvironment(direct)
if err == nil {
tr.Dial = proxyDialer.Dial
}
return tr
}
// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) (*dockerClient, error) { func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool) (*dockerClient, error) {
@ -56,7 +92,7 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool)
if err != nil { if err != nil {
return nil, err return nil, err
} }
var tr *http.Transport tr := newTransport()
if ctx != nil && (ctx.DockerCertPath != "" || ctx.DockerInsecureSkipTLSVerify) { if ctx != nil && (ctx.DockerCertPath != "" || ctx.DockerInsecureSkipTLSVerify) {
tlsc := &tls.Config{} tlsc := &tls.Config{}
@ -68,14 +104,12 @@ func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool)
tlsc.Certificates = append(tlsc.Certificates, cert) tlsc.Certificates = append(tlsc.Certificates, cert)
} }
tlsc.InsecureSkipVerify = ctx.DockerInsecureSkipTLSVerify tlsc.InsecureSkipVerify = ctx.DockerInsecureSkipTLSVerify
tr = &http.Transport{ tr.TLSClientConfig = tlsc
TLSClientConfig: tlsc,
}
} }
client := &http.Client{} if tr.TLSClientConfig == nil {
if tr != nil { tr.TLSClientConfig = serverDefault()
client.Transport = tr
} }
client := &http.Client{Transport: tr}
sigBase, err := configuredSignatureStorageBase(ctx, ref, write) sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
if err != nil { if err != nil {
@ -210,8 +244,9 @@ func (c *dockerClient) getBearerToken(realm, service, scope string) (string, err
if c.username != "" && c.password != "" { if c.username != "" && c.password != "" {
authReq.SetBasicAuth(c.username, c.password) authReq.SetBasicAuth(c.username, c.password)
} }
// insecure for now to contact the external token service tr := newTransport()
tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} // TODO(runcom): insecure for now to contact the external token service
tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client{Transport: tr} client := &http.Client{Transport: tr}
res, err := client.Do(authReq) res, err := client.Do(authReq)
if err != nil { if err != nil {
@ -250,10 +285,6 @@ func getAuth(ctx *types.SystemContext, registry string) (string, string, error)
if ctx != nil && ctx.DockerAuthConfig != nil { if ctx != nil && ctx.DockerAuthConfig != nil {
return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil
} }
// TODO(runcom): get this from *cli.Context somehow
//if username != "" && password != "" {
//return username, password, nil
//}
var dockerAuth dockerConfigFile var dockerAuth dockerConfigFile
dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName) dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName)
if _, err := os.Stat(dockerCfgPath); err == nil { if _, err := os.Stat(dockerCfgPath); err == nil {

View File

@ -2,8 +2,6 @@ package docker
import ( import (
"bytes" "bytes"
"crypto/sha256"
"encoding/hex"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
@ -16,13 +14,14 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
type dockerImageDestination struct { type dockerImageDestination struct {
ref dockerReference ref dockerReference
c *dockerClient c *dockerClient
// State // State
manifestDigest string // or "" if not yet known. manifestDigest digest.Digest // or "" if not yet known.
} }
// newImageDestination creates a new ImageDestination for the specified image reference. // newImageDestination creates a new ImageDestination for the specified image reference.
@ -82,8 +81,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// to any other readers for download using the supplied digest. // to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
if inputInfo.Digest != "" { if inputInfo.Digest.String() != "" {
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), inputInfo.Digest) checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), inputInfo.Digest.String())
logrus.Debugf("Checking %s", checkURL) logrus.Debugf("Checking %s", checkURL)
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil) res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
@ -127,17 +126,16 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error()) return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
} }
h := sha256.New() digester := digest.Canonical.New()
sizeCounter := &sizeCounter{} sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(h, sizeCounter)) tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size) res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size)
if err != nil { if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", *res) logrus.Debugf("Error uploading layer chunked, response %#v", *res)
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
defer res.Body.Close() defer res.Body.Close()
hash := h.Sum(nil) computedDigest := digester.Digest()
computedDigest := "sha256:" + hex.EncodeToString(hash[:])
uploadLocation, err = res.Location() uploadLocation, err = res.Location()
if err != nil { if err != nil {
@ -148,7 +146,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
locationQuery := uploadLocation.Query() locationQuery := uploadLocation.Query()
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717 // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
locationQuery.Set("digest", computedDigest) locationQuery.Set("digest", computedDigest.String())
uploadLocation.RawQuery = locationQuery.Encode() uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1) res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1)
if err != nil { if err != nil {
@ -211,7 +209,7 @@ func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
} }
// FIXME: This assumption that signatures are stored after the manifest rather breaks the model. // FIXME: This assumption that signatures are stored after the manifest rather breaks the model.
if d.manifestDigest == "" { if d.manifestDigest.String() == "" {
return fmt.Errorf("Unknown manifest digest, can't add signatures") return fmt.Errorf("Unknown manifest digest, can't add signatures")
} }
@ -263,7 +261,7 @@ func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte)
return nil return nil
case "http", "https": case "http", "https":
return fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location.", url.Scheme, url.String()) return fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default: default:
return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String()) return fmt.Errorf("Unsupported scheme when writing signature to %s", url.String())
} }
@ -282,7 +280,7 @@ func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error
return false, err return false, err
case "http", "https": case "http", "https":
return false, fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location.", url.Scheme, url.String()) return false, fmt.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
default: default:
return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String()) return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", url.String())
} }

View File

@ -13,6 +13,7 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
"github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client"
) )
@ -67,6 +68,8 @@ func simplifyContentType(contentType string) string {
return mimeType return mimeType
} }
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *dockerImageSource) GetManifest() ([]byte, string, error) { func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
err := s.ensureManifestIsLoaded() err := s.ensureManifestIsLoaded()
if err != nil { if err != nil {
@ -96,8 +99,8 @@ func (s *dockerImageSource) fetchManifest(tagOrDigest string) ([]byte, string, e
// GetTargetManifest returns an image's manifest given a digest. // GetTargetManifest returns an image's manifest given a digest.
// This is mainly used to retrieve a single image's manifest out of a manifest list. // This is mainly used to retrieve a single image's manifest out of a manifest list.
func (s *dockerImageSource) GetTargetManifest(digest string) ([]byte, string, error) { func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
return s.fetchManifest(digest) return s.fetchManifest(digest.String())
} }
// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
@ -128,8 +131,8 @@ func (s *dockerImageSource) ensureManifestIsLoaded() error {
} }
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
func (s *dockerImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { func (s *dockerImageSource) GetBlob(digest digest.Digest) (io.ReadCloser, int64, error) {
url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), digest) url := fmt.Sprintf(blobsURL, s.ref.ref.RemoteName(), digest.String())
logrus.Debugf("Downloading %s", url) logrus.Debugf("Downloading %s", url)
res, err := s.c.makeRequest("GET", url, nil, nil) res, err := s.c.makeRequest("GET", url, nil, nil)
if err != nil { if err != nil {
@ -244,7 +247,7 @@ func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
switch get.StatusCode { switch get.StatusCode {
case http.StatusOK: case http.StatusOK:
case http.StatusNotFound: case http.StatusNotFound:
return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry.", ref.ref) return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
default: default:
return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status) return fmt.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
} }

View File

@ -9,6 +9,7 @@ import (
"path/filepath" "path/filepath"
"strings" "strings"
"github.com/docker/distribution/digest"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
@ -188,11 +189,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable. // signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
// Returns nil iff base == nil. // Returns nil iff base == nil.
func signatureStorageURL(base signatureStorageBase, manifestDigest string, index int) *url.URL { func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
if base == nil { if base == nil {
return nil return nil
} }
url := *base url := *base
url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest, index+1) url.Path = fmt.Sprintf("%s@%s/signature-%d", url.Path, manifestDigest.String(), index+1)
return &url return &url
} }

View File

@ -8,6 +8,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
type platformSpec struct { type platformSpec struct {
@ -36,7 +37,7 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen
if err := json.Unmarshal(manblob, &list); err != nil { if err := json.Unmarshal(manblob, &list); err != nil {
return nil, err return nil, err
} }
var targetManifestDigest string var targetManifestDigest digest.Digest
for _, d := range list.Manifests { for _, d := range list.Manifests {
if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS { if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS {
targetManifestDigest = d.Digest targetManifestDigest = d.Digest

View File

@ -1,8 +1,6 @@
package image package image
import ( import (
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -13,6 +11,7 @@ import (
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
var ( var (
@ -20,7 +19,7 @@ var (
) )
type fsLayersSchema1 struct { type fsLayersSchema1 struct {
BlobSum string `json:"blobSum"` BlobSum digest.Digest `json:"blobSum"`
} }
type historySchema1 struct { type historySchema1 struct {
@ -54,16 +53,19 @@ func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
if err := json.Unmarshal(manifest, mschema1); err != nil { if err := json.Unmarshal(manifest, mschema1); err != nil {
return nil, err return nil, err
} }
if mschema1.SchemaVersion != 1 {
return nil, fmt.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
}
if len(mschema1.FSLayers) != len(mschema1.History) {
return nil, errors.New("length of history not equal to number of layers")
}
if len(mschema1.FSLayers) == 0 {
return nil, errors.New("no FSLayers in manifest")
}
if err := fixManifestLayers(mschema1); err != nil { if err := fixManifestLayers(mschema1); err != nil {
return nil, err return nil, err
} }
// TODO(runcom): verify manifest schema 1, 2 etc
//if len(m.FSLayers) != len(m.History) {
//return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
//}
//if len(m.FSLayers) == 0 {
//return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
//}
return mschema1, nil return mschema1, nil
} }
@ -201,7 +203,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
} }
} }
if imgs[len(imgs)-1].Parent != "" { if imgs[len(imgs)-1].Parent != "" {
return errors.New("Invalid parent ID in the base layer of the image.") return errors.New("Invalid parent ID in the base layer of the image")
} }
// check general duplicates to error instead of a deadlock // check general duplicates to error instead of a deadlock
idmap := make(map[string]struct{}) idmap := make(map[string]struct{})
@ -220,7 +222,7 @@ func fixManifestLayers(manifest *manifestSchema1) error {
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
} else if imgs[i].Parent != imgs[i+1].ID { } else if imgs[i].Parent != imgs[i+1].ID {
return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) return fmt.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
} }
} }
return nil return nil
@ -284,11 +286,10 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl
if err != nil { if err != nil {
return nil, err return nil, err
} }
configHash := sha256.Sum256(configJSON)
configDescriptor := descriptor{ configDescriptor := descriptor{
MediaType: "application/vnd.docker.container.image.v1+json", MediaType: "application/vnd.docker.container.image.v1+json",
Size: int64(len(configJSON)), Size: int64(len(configJSON)),
Digest: "sha256:" + hex.EncodeToString(configHash[:]), Digest: digest.FromBytes(configJSON),
} }
m2 := manifestSchema2FromComponents(configDescriptor, configJSON, layers) m2 := manifestSchema2FromComponents(configDescriptor, configJSON, layers)

View File

@ -12,6 +12,7 @@ import (
"github.com/Sirupsen/logrus" "github.com/Sirupsen/logrus"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) // gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
@ -24,12 +25,12 @@ var gzippedEmptyLayer = []byte{
} }
// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer // gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
const gzippedEmptyLayerDigest = "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
type descriptor struct { type descriptor struct {
MediaType string `json:"mediaType"` MediaType string `json:"mediaType"`
Size int64 `json:"size"` Size int64 `json:"size"`
Digest string `json:"digest"` Digest digest.Digest `json:"digest"`
} }
type manifestSchema2 struct { type manifestSchema2 struct {
@ -91,8 +92,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
hash := sha256.Sum256(blob) computedDigest := digest.FromBytes(blob)
computedDigest := "sha256:" + hex.EncodeToString(hash[:])
if computedDigest != m.ConfigDescriptor.Digest { if computedDigest != m.ConfigDescriptor.Digest {
return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
} }
@ -189,7 +189,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
parentV1ID = v1ID parentV1ID = v1ID
v1Index := len(imageConfig.History) - 1 - v2Index v1Index := len(imageConfig.History) - 1 - v2Index
var blobDigest string var blobDigest digest.Digest
if historyEntry.EmptyLayer { if historyEntry.EmptyLayer {
if !haveGzippedEmptyLayer { if !haveGzippedEmptyLayer {
logrus.Debugf("Uploading empty layer during conversion to schema 1") logrus.Debugf("Uploading empty layer during conversion to schema 1")
@ -252,12 +252,11 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination)
return memoryImageFromManifest(m1), nil return memoryImageFromManifest(m1), nil
} }
func v1IDFromBlobDigestAndComponents(blobDigest string, others ...string) (string, error) { func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
blobDigestComponents := strings.SplitN(blobDigest, ":", 2) if err := blobDigest.Validate(); err != nil {
if len(blobDigestComponents) != 2 { return "", err
return "", fmt.Errorf("Invalid layer digest %s: expecting algorithm:value", blobDigest)
} }
parts := append([]string{blobDigestComponents[1]}, others...) parts := append([]string{blobDigest.Hex()}, others...)
v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
return hex.EncodeToString(v1IDHash[:]), nil return hex.EncodeToString(v1IDHash[:]), nil
} }

View File

@ -1,14 +1,13 @@
package image package image
import ( import (
"errors"
"fmt"
"time" "time"
"github.com/docker/engine-api/types/strslice" "github.com/docker/engine-api/types/strslice"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
type config struct { type config struct {
@ -86,14 +85,23 @@ func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string)
// need to happen within the ImageSource. // need to happen within the ImageSource.
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
return manifestSchema1FromManifest(manblob) return manifestSchema1FromManifest(manblob)
case manifest.DockerV2Schema2MediaType: case manifest.DockerV2Schema2MediaType, imgspecv1.MediaTypeImageManifest:
// FIXME: OCI v1 is compatible with Docker Schema2, "docker_schema2.go" is good enough for reading images, but this will
// need to be modified for write support due to differing MIME types.
return manifestSchema2FromManifest(src, manblob) return manifestSchema2FromManifest(src, manblob)
case manifest.DockerV2ListMediaType: case manifest.DockerV2ListMediaType:
return manifestSchema2FromManifestList(src, manblob) return manifestSchema2FromManifestList(src, manblob)
case "":
return nil, errors.New("could not guess manifest media type")
default: default:
return nil, fmt.Errorf("unsupported manifest media type %s", mt) // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
// to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
// and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
//
// Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
return manifestSchema1FromManifest(manblob)
} }
} }
@ -106,7 +114,7 @@ func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) {
layers := m.LayerInfos() layers := m.LayerInfos()
info.Layers = make([]string, len(layers)) info.Layers = make([]string, len(layers))
for i, layer := range layers { for i, layer := range layers {
info.Layers[i] = layer.Digest info.Layers[i] = layer.Digest.String()
} }
return info, nil return info, nil
} }

View File

@ -57,14 +57,6 @@ func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
if manifestMIMEType == "" || manifestMIMEType == "text/plain" {
// Crane registries can return "text/plain".
// This makes no real sense, but it happens
// because requests for manifests are
// redirected to a content distribution
// network which is configured that way.
manifestMIMEType = manifest.GuessMIMEType(manifestBlob)
}
parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType) parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType)
if err != nil { if err != nil {
@ -79,7 +71,7 @@ func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) {
}, nil }, nil
} }
// Manifest overrides the UnparsedImage.Manifest to use the fields which we have already fetched, after guessing and overrides. // Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
func (i *sourcedImage) Manifest() ([]byte, string, error) { func (i *sourcedImage) Manifest() ([]byte, string, error) {
return i.manifestBlob, i.manifestMIMEType, nil return i.manifestBlob, i.manifestMIMEType, nil
} }

View File

@ -53,7 +53,7 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) {
ref := i.Reference().DockerReference() ref := i.Reference().DockerReference()
if ref != nil { if ref != nil {
if canonical, ok := ref.(reference.Canonical); ok { if canonical, ok := ref.(reference.Canonical); ok {
digest := canonical.Digest().String() digest := canonical.Digest()
matches, err := manifest.MatchesDigest(m, digest) matches, err := manifest.MatchesDigest(m, digest)
if err != nil { if err != nil {
return nil, "", fmt.Errorf("Error computing manifest digest: %v", err) return nil, "", fmt.Errorf("Error computing manifest digest: %v", err)

View File

@ -1,10 +1,9 @@
package manifest package manifest
import ( import (
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"github.com/docker/distribution/digest"
"github.com/docker/libtrust" "github.com/docker/libtrust"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -70,7 +69,7 @@ func GuessMIMEType(manifest []byte) string {
} }
// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. // Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
func Digest(manifest []byte) (string, error) { func Digest(manifest []byte) (digest.Digest, error) {
if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
sig, err := libtrust.ParsePrettySignature(manifest, "signatures") sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
if err != nil { if err != nil {
@ -84,15 +83,14 @@ func Digest(manifest []byte) (string, error) {
} }
} }
hash := sha256.Sum256(manifest) return digest.FromBytes(manifest), nil
return "sha256:" + hex.EncodeToString(hash[:]), nil
} }
// MatchesDigest returns true iff the manifest matches expectedDigest. // MatchesDigest returns true iff the manifest matches expectedDigest.
// Error may be set if this returns false. // Error may be set if this returns false.
// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, // Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. // or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
func MatchesDigest(manifest []byte, expectedDigest string) (bool, error) { func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
// This should eventually support various digest types. // This should eventually support various digest types.
actualDigest, err := Digest(manifest) actualDigest, err := Digest(manifest)
if err != nil { if err != nil {

View File

@ -1,8 +1,6 @@
package layout package layout
import ( import (
"crypto/sha256"
"encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
@ -13,6 +11,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -75,14 +74,14 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo
} }
}() }()
h := sha256.New() digester := digest.Canonical.New()
tee := io.TeeReader(stream, h) tee := io.TeeReader(stream, digester.Hash())
size, err := io.Copy(blobFile, tee) size, err := io.Copy(blobFile, tee)
if err != nil { if err != nil {
return types.BlobInfo{}, err return types.BlobInfo{}, err
} }
computedDigest := "sha256:" + hex.EncodeToString(h.Sum(nil)) computedDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size { if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size) return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
} }
@ -153,7 +152,7 @@ func (d *ociImageDestination) PutManifest(m []byte) error {
return err return err
} }
desc := imgspecv1.Descriptor{} desc := imgspecv1.Descriptor{}
desc.Digest = digest desc.Digest = digest.String()
// TODO(runcom): beaware and add support for OCI manifest list // TODO(runcom): beaware and add support for OCI manifest list
desc.MediaType = mt desc.MediaType = mt
desc.Size = int64(len(ociMan)) desc.Size = int64(len(ociMan))

View File

@ -0,0 +1,94 @@
package layout
import (
"encoding/json"
"io"
"io/ioutil"
"os"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
"github.com/docker/distribution/digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
)
type ociImageSource struct {
ref ociReference
}
// newImageSource returns an ImageSource for reading from an existing directory.
func newImageSource(ref ociReference) types.ImageSource {
return &ociImageSource{ref: ref}
}
// Reference returns the reference used to set up this source.
func (s *ociImageSource) Reference() types.ImageReference {
return s.ref
}
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ociImageSource) Close() {
}
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *ociImageSource) GetManifest() ([]byte, string, error) {
descriptorPath := s.ref.descriptorPath(s.ref.tag)
data, err := ioutil.ReadFile(descriptorPath)
if err != nil {
return nil, "", err
}
desc := imgspecv1.Descriptor{}
err = json.Unmarshal(data, &desc)
if err != nil {
return nil, "", err
}
manifestPath, err := s.ref.blobPath(digest.Digest(desc.Digest))
if err != nil {
return nil, "", err
}
m, err := ioutil.ReadFile(manifestPath)
if err != nil {
return nil, "", err
}
return m, manifest.GuessMIMEType(m), nil
}
func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
manifestPath, err := s.ref.blobPath(digest)
if err != nil {
return nil, "", err
}
m, err := ioutil.ReadFile(manifestPath)
if err != nil {
return nil, "", err
}
return m, manifest.GuessMIMEType(m), nil
}
// GetBlob returns a stream for the specified blob, and the blob's size.
func (s *ociImageSource) GetBlob(digest digest.Digest) (io.ReadCloser, int64, error) {
path, err := s.ref.blobPath(digest)
if err != nil {
return nil, 0, err
}
r, err := os.Open(path)
if err != nil {
return nil, 0, err
}
fi, err := r.Stat()
if err != nil {
return nil, 0, err
}
return r, fi.Size(), nil
}
func (s *ociImageSource) GetSignatures() ([][]byte, error) {
return [][]byte{}, nil
}

View File

@ -9,7 +9,9 @@ import (
"github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/directory/explicitfilepath"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/containers/image/image"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
// Transport is an ImageTransport for OCI directories. // Transport is an ImageTransport for OCI directories.
@ -169,7 +171,8 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string {
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) { func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
return nil, errors.New("Full Image support not implemented for oci: image names") src := newImageSource(ref)
return image.FromSource(src)
} }
// NewImageSource returns a types.ImageSource for this reference, // NewImageSource returns a types.ImageSource for this reference,
@ -177,7 +180,7 @@ func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error)
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes. // nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
// The caller must call .Close() on the returned ImageSource. // The caller must call .Close() on the returned ImageSource.
func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) { func (ref ociReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
return nil, errors.New("Reading images not implemented for oci: image names") return newImageSource(ref), nil
} }
// NewImageDestination returns a types.ImageDestination for this reference. // NewImageDestination returns a types.ImageDestination for this reference.
@ -197,12 +200,11 @@ func (ref ociReference) ociLayoutPath() string {
} }
// blobPath returns a path for a blob within a directory using OCI image-layout conventions. // blobPath returns a path for a blob within a directory using OCI image-layout conventions.
func (ref ociReference) blobPath(digest string) (string, error) { func (ref ociReference) blobPath(digest digest.Digest) (string, error) {
pts := strings.SplitN(digest, ":", 2) if err := digest.Validate(); err != nil {
if len(pts) != 2 { return "", fmt.Errorf("unexpected digest reference %s: %v", digest, err)
return "", fmt.Errorf("unexpected digest reference %s", digest)
} }
return filepath.Join(ref.dir, "blobs", pts[0], pts[1]), nil return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil
} }
// descriptorPath returns a path for the manifest within a directory using OCI conventions. // descriptorPath returns a path for the manifest within a directory using OCI conventions.

View File

@ -17,6 +17,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/containers/image/version" "github.com/containers/image/version"
"github.com/docker/distribution/digest"
) )
// openshiftClient is configuration for dealing with a single image stream, for reading or writing. // openshiftClient is configuration for dealing with a single image stream, for reading or writing.
@ -196,13 +197,15 @@ func (s *openshiftImageSource) Close() {
} }
} }
func (s *openshiftImageSource) GetTargetManifest(digest string) ([]byte, string, error) { func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
if err := s.ensureImageIsResolved(); err != nil { if err := s.ensureImageIsResolved(); err != nil {
return nil, "", err return nil, "", err
} }
return s.docker.GetTargetManifest(digest) return s.docker.GetTargetManifest(digest)
} }
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service.
func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
if err := s.ensureImageIsResolved(); err != nil { if err := s.ensureImageIsResolved(); err != nil {
return nil, "", err return nil, "", err
@ -211,7 +214,7 @@ func (s *openshiftImageSource) GetManifest() ([]byte, string, error) {
} }
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
func (s *openshiftImageSource) GetBlob(digest string) (io.ReadCloser, int64, error) { func (s *openshiftImageSource) GetBlob(digest digest.Digest) (io.ReadCloser, int64, error) {
if err := s.ensureImageIsResolved(); err != nil { if err := s.ensureImageIsResolved(); err != nil {
return nil, 0, err return nil, 0, err
} }
@ -362,7 +365,7 @@ func (d *openshiftImageDestination) PutManifest(m []byte) error {
if err != nil { if err != nil {
return err return err
} }
d.imageStreamImageName = manifestDigest d.imageStreamImageName = manifestDigest.String()
return d.docker.PutManifest(m) return d.docker.PutManifest(m)
} }

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/docker/distribution/digest"
) )
// SignDockerManifest returns a signature for manifest as the specified dockerReference, // SignDockerManifest returns a signature for manifest as the specified dockerReference,
@ -42,7 +43,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
} }
return nil return nil
}, },
validateSignedDockerManifestDigest: func(signedDockerManifestDigest string) error { validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest) matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
if err != nil { if err != nil {
return err return err

View File

@ -386,7 +386,7 @@ func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type)) return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
} }
if signedIdentity == nil { if signedIdentity == nil {
tmp.SignedIdentity = NewPRMMatchExact() tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
} else { } else {
si, err := newPolicyReferenceMatchFromJSON(signedIdentity) si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
if err != nil { if err != nil {
@ -501,7 +501,7 @@ func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// newPolicyRequirementFromJSON parses JSON data into a PolicyReferenceMatch implementation. // newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) { func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
var typeField prmCommon var typeField prmCommon
if err := json.Unmarshal(data, &typeField); err != nil { if err := json.Unmarshal(data, &typeField); err != nil {
@ -511,6 +511,8 @@ func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error)
switch typeField.Type { switch typeField.Type {
case prmTypeMatchExact: case prmTypeMatchExact:
res = &prmMatchExact{} res = &prmMatchExact{}
case prmTypeMatchRepoDigestOrExact:
res = &prmMatchRepoDigestOrExact{}
case prmTypeMatchRepository: case prmTypeMatchRepository:
res = &prmMatchRepository{} res = &prmMatchRepository{}
case prmTypeExactReference: case prmTypeExactReference:
@ -561,6 +563,41 @@ func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type.
func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
}
// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch.
func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch {
return newPRMMatchRepoDigestOrExact()
}
// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler.
var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
// UnmarshalJSON implements the json.Unmarshaler interface.
func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
*prm = prmMatchRepoDigestOrExact{}
var tmp prmMatchRepoDigestOrExact
if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
switch key {
case "type":
return &tmp.Type
default:
return nil
}
}); err != nil {
return err
}
if tmp.Type != prmTypeMatchRepoDigestOrExact {
return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
}
*prm = *newPRMMatchRepoDigestOrExact()
return nil
}
// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type. // newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
func newPRMMatchRepository() *prmMatchRepository { func newPRMMatchRepository() *prmMatchRepository {
return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}} return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}

View File

@ -11,6 +11,7 @@ import (
"github.com/containers/image/manifest" "github.com/containers/image/manifest"
"github.com/containers/image/types" "github.com/containers/image/types"
"github.com/docker/distribution/digest"
) )
func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) { func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
@ -75,7 +76,7 @@ func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig [
} }
return nil return nil
}, },
validateSignedDockerManifestDigest: func(digest string) error { validateSignedDockerManifestDigest: func(digest digest.Digest) error {
m, _, err := image.Manifest() m, _, err := image.Manifest()
if err != nil { if err != nil {
return err return err

View File

@ -36,6 +36,29 @@ func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, sign
return signature.String() == intended.String() return signature.String() == intended.String()
} }
func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil {
return false
}
// Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
if reference.IsNameOnly(signature) {
return false
}
switch intended.(type) {
case reference.NamedTagged: // Includes the case when intended has both a tag and a digest.
return signature.String() == intended.String()
case reference.Canonical:
// We dont actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
// Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
// we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
return signature.Name() == intended.Name()
default: // !reference.IsNameOnly(intended)
return false
}
}
func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool { func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference) intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
if err != nil { if err != nil {

View File

@ -116,10 +116,11 @@ type prmCommon struct {
type prmTypeIdentifier string type prmTypeIdentifier string
const ( const (
prmTypeMatchExact prmTypeIdentifier = "matchExact" prmTypeMatchExact prmTypeIdentifier = "matchExact"
prmTypeMatchRepository prmTypeIdentifier = "matchRepository" prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact"
prmTypeExactReference prmTypeIdentifier = "exactReference" prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
prmTypeExactRepository prmTypeIdentifier = "exactRepository" prmTypeExactReference prmTypeIdentifier = "exactReference"
prmTypeExactRepository prmTypeIdentifier = "exactRepository"
) )
// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly. // prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
@ -127,6 +128,12 @@ type prmMatchExact struct {
prmCommon prmCommon
} }
// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly,
// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest
type prmMatchRepoDigestOrExact struct {
prmCommon
}
// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag. // prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
type prmMatchRepository struct { type prmMatchRepository struct {
prmCommon prmCommon

View File

@ -9,6 +9,7 @@ import (
"time" "time"
"github.com/containers/image/version" "github.com/containers/image/version"
"github.com/docker/distribution/digest"
) )
const ( const (
@ -26,7 +27,7 @@ func (err InvalidSignatureError) Error() string {
// Signature is a parsed content of a signature. // Signature is a parsed content of a signature.
type Signature struct { type Signature struct {
DockerManifestDigest string // FIXME: more precise type? DockerManifestDigest digest.Digest
DockerReference string // FIXME: more precise type? DockerReference string // FIXME: more precise type?
} }
@ -50,7 +51,7 @@ func (s privateSignature) marshalJSONWithVariables(timestamp int64, creatorID st
} }
critical := map[string]interface{}{ critical := map[string]interface{}{
"type": signatureType, "type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest}, "image": map[string]string{"docker-manifest-digest": s.DockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.DockerReference}, "identity": map[string]string{"docker-reference": s.DockerReference},
} }
optional := map[string]interface{}{ optional := map[string]interface{}{
@ -122,11 +123,11 @@ func (s *privateSignature) strictUnmarshalJSON(data []byte) error {
if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil { if err := validateExactMapKeys(image, "docker-manifest-digest"); err != nil {
return err return err
} }
digest, err := stringField(image, "docker-manifest-digest") digestString, err := stringField(image, "docker-manifest-digest")
if err != nil { if err != nil {
return err return err
} }
s.DockerManifestDigest = digest s.DockerManifestDigest = digest.Digest(digestString)
identity, err := mapField(c, "identity") identity, err := mapField(c, "identity")
if err != nil { if err != nil {
@ -162,7 +163,7 @@ func (s privateSignature) sign(mech SigningMechanism, keyIdentity string) ([]byt
type signatureAcceptanceRules struct { type signatureAcceptanceRules struct {
validateKeyIdentity func(string) error validateKeyIdentity func(string) error
validateSignedDockerReference func(string) error validateSignedDockerReference func(string) error
validateSignedDockerManifestDigest func(string) error validateSignedDockerManifestDigest func(digest.Digest) error
} }
// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components // verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components

View File

@ -5,6 +5,7 @@ import (
"time" "time"
"github.com/containers/image/docker/reference" "github.com/containers/image/docker/reference"
"github.com/docker/distribution/digest"
) )
// ImageTransport is a top-level namespace for ways to to store/load an image. // ImageTransport is a top-level namespace for ways to to store/load an image.
@ -91,8 +92,8 @@ type ImageReference interface {
// BlobInfo collects known information about a blob (layer/config). // BlobInfo collects known information about a blob (layer/config).
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. // In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
type BlobInfo struct { type BlobInfo struct {
Digest string // "" if unknown. Digest digest.Digest // "" if unknown.
Size int64 // -1 if unknown Size int64 // -1 if unknown
} }
// ImageSource is a service, possibly remote (= slow), to download components of a single image. // ImageSource is a service, possibly remote (= slow), to download components of a single image.
@ -108,14 +109,14 @@ type ImageSource interface {
Reference() ImageReference Reference() ImageReference
// Close removes resources associated with an initialized ImageSource, if any. // Close removes resources associated with an initialized ImageSource, if any.
Close() Close()
// GetManifest returns the image's manifest along with its MIME type. The empty string is returned if the MIME type is unknown. // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
// It may use a remote (= slow) service. // It may use a remote (= slow) service.
GetManifest() ([]byte, string, error) GetManifest() ([]byte, string, error)
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
// out of a manifest list. // out of a manifest list.
GetTargetManifest(digest string) ([]byte, string, error) GetTargetManifest(digest digest.Digest) ([]byte, string, error)
// GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown). // GetBlob returns a stream for the specified blob, and the blobs size (or -1 if unknown).
GetBlob(digest string) (io.ReadCloser, int64, error) GetBlob(digest digest.Digest) (io.ReadCloser, int64, error)
// GetSignatures returns the image's signatures. It may use a remote (= slow) service. // GetSignatures returns the image's signatures. It may use a remote (= slow) service.
GetSignatures() ([][]byte, error) GetSignatures() ([][]byte, error)
} }

View File

@ -1,6 +1,6 @@
ISC License ISC License
Copyright (c) 2012-2013 Dave Collins <dave@davec.name> Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above purpose with or without fee is hereby granted, provided that the above

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name> // Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
// //
// Permission to use, copy, modify, and distribute this software for any // Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above // purpose with or without fee is hereby granted, provided that the above

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name> // Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
// //
// Permission to use, copy, modify, and distribute this software for any // Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above // purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -67,6 +67,15 @@ type ConfigState struct {
// Google App Engine or with the "safe" build tag specified. // Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once // ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false, // a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer // means it will print the results of invoking the custom error or Stringer

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -91,6 +91,15 @@ The following configuration options are available:
which only accept pointer receivers from non-pointer variables. which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default. Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod * ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default. methods. Recursion after method invocation is disabled by default.

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
d.w.Write(closeParenBytes) d.w.Write(closeParenBytes)
// Display pointer information. // Display pointer information.
if len(pointerChain) > 0 { if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes) d.w.Write(openParenBytes)
for i, addr := range pointerChain { for i, addr := range pointerChain {
if i > 0 { if i > 0 {
@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) {
case reflect.Map, reflect.String: case reflect.Map, reflect.String:
valueLen = v.Len() valueLen = v.Len()
} }
if valueLen != 0 || valueCap != 0 { if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes) d.w.Write(openParenBytes)
if valueLen != 0 { if valueLen != 0 {
d.w.Write(lenEqualsBytes) d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10) printInt(d.w, int64(valueLen), 10)
} }
if valueCap != 0 { if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 { if valueLen != 0 {
d.w.Write(spaceBytes) d.w.Write(spaceBytes)
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2013 Dave Collins <dave@davec.name> * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
* *
* Permission to use, copy, modify, and distribute this software for any * Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above

View File

@ -156,7 +156,7 @@ func (td *tempDir) newPath() string {
} }
} }
result := filepath.Join(td.path, strconv.Itoa(td.counter)) result := filepath.Join(td.path, strconv.Itoa(td.counter))
td.counter += 1 td.counter++
return result return result
} }
@ -274,7 +274,7 @@ func (c *C) logString(issue string) {
func (c *C) logCaller(skip int) { func (c *C) logCaller(skip int) {
// This is a bit heavier than it ought to be. // This is a bit heavier than it ought to be.
skip += 1 // Our own frame. skip++ // Our own frame.
pc, callerFile, callerLine, ok := runtime.Caller(skip) pc, callerFile, callerLine, ok := runtime.Caller(skip)
if !ok { if !ok {
return return
@ -284,7 +284,7 @@ func (c *C) logCaller(skip int) {
testFunc := runtime.FuncForPC(c.method.PC()) testFunc := runtime.FuncForPC(c.method.PC())
if runtime.FuncForPC(pc) != testFunc { if runtime.FuncForPC(pc) != testFunc {
for { for {
skip += 1 skip++
if pc, file, line, ok := runtime.Caller(skip); ok { if pc, file, line, ok := runtime.Caller(skip); ok {
// Note that the test line may be different on // Note that the test line may be different on
// distinct calls for the same test. Showing // distinct calls for the same test. Showing
@ -460,10 +460,10 @@ func (tracker *resultTracker) _loopRoutine() {
// Calls still running. Can't stop. // Calls still running. Can't stop.
select { select {
// XXX Reindent this (not now to make diff clear) // XXX Reindent this (not now to make diff clear)
case c = <-tracker._expectChan: case <-tracker._expectChan:
tracker._waiting += 1 tracker._waiting++
case c = <-tracker._doneChan: case c = <-tracker._doneChan:
tracker._waiting -= 1 tracker._waiting--
switch c.status() { switch c.status() {
case succeededSt: case succeededSt:
if c.kind == testKd { if c.kind == testKd {
@ -498,9 +498,9 @@ func (tracker *resultTracker) _loopRoutine() {
select { select {
case tracker._stopChan <- true: case tracker._stopChan <- true:
return return
case c = <-tracker._expectChan: case <-tracker._expectChan:
tracker._waiting += 1 tracker._waiting++
case c = <-tracker._doneChan: case <-tracker._doneChan:
panic("Tracker got an unexpected done call.") panic("Tracker got an unexpected done call.")
} }
} }
@ -568,13 +568,13 @@ func newSuiteRunner(suite interface{}, runConf *RunConf) *suiteRunner {
var filterRegexp *regexp.Regexp var filterRegexp *regexp.Regexp
if conf.Filter != "" { if conf.Filter != "" {
if regexp, err := regexp.Compile(conf.Filter); err != nil { regexp, err := regexp.Compile(conf.Filter)
if err != nil {
msg := "Bad filter expression: " + err.Error() msg := "Bad filter expression: " + err.Error()
runner.tracker.result.RunError = errors.New(msg) runner.tracker.result.RunError = errors.New(msg)
return runner return runner
} else {
filterRegexp = regexp
} }
filterRegexp = regexp
} }
for i := 0; i != suiteNumMethods; i++ { for i := 0; i != suiteNumMethods; i++ {

View File

@ -212,7 +212,7 @@ type hasLenChecker struct {
// The HasLen checker verifies that the obtained value has the // The HasLen checker verifies that the obtained value has the
// provided length. In many cases this is superior to using Equals // provided length. In many cases this is superior to using Equals
// in conjuction with the len function because in case the check // in conjunction with the len function because in case the check
// fails the value itself will be printed, instead of its length, // fails the value itself will be printed, instead of its length,
// providing more details for figuring the problem. // providing more details for figuring the problem.
// //