mirror of
https://github.com/containers/skopeo.git
synced 2025-06-26 14:52:36 +00:00
Merge pull request #210 from mtrmac/api-changes
Vendor after merging mtrmac/image:api-changes and update API use
This commit is contained in:
commit
9c1cb79754
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/containers/image/directory"
|
"github.com/containers/image/directory"
|
||||||
"github.com/containers/image/image"
|
"github.com/containers/image/image"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
|
"github.com/containers/image/types"
|
||||||
"github.com/urfave/cli"
|
"github.com/urfave/cli"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,12 +31,26 @@ var layersCmd = cli.Command{
|
|||||||
|
|
||||||
blobDigests := c.Args().Tail()
|
blobDigests := c.Args().Tail()
|
||||||
if len(blobDigests) == 0 {
|
if len(blobDigests) == 0 {
|
||||||
b, err := src.BlobDigests()
|
layers, err := src.LayerInfos()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
blobDigests = b
|
seenLayers := map[string]struct{}{}
|
||||||
|
for _, info := range layers {
|
||||||
|
if _, ok := seenLayers[info.Digest]; !ok {
|
||||||
|
blobDigests = append(blobDigests, info.Digest)
|
||||||
|
seenLayers[info.Digest] = struct{}{}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
configInfo, err := src.ConfigInfo()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if configInfo.Digest != "" {
|
||||||
|
blobDigests = append(blobDigests, configInfo.Digest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
tmpDir, err := ioutil.TempDir(".", "layers-")
|
tmpDir, err := ioutil.TempDir(".", "layers-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -58,7 +73,7 @@ var layersCmd = cli.Command{
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, _, err := dest.PutBlob(r, digest, blobSize); err != nil {
|
if _, err := dest.PutBlob(r, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
|
||||||
r.Close()
|
r.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
58
vendor/github.com/containers/image/copy/copy.go
generated
vendored
58
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@ -121,31 +121,26 @@ func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, des
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blobDigests, err := src.BlobDigests()
|
configInfo, err := src.ConfigInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error parsing manifest: %v", err)
|
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||||
}
|
}
|
||||||
for _, digest := range blobDigests {
|
if configInfo.Digest != "" {
|
||||||
stream, blobSize, err := rawSource.GetBlob(digest)
|
if err := copyBlob(dest, rawSource, configInfo.Digest); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
layerInfos, err := src.LayerInfos()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error reading blob %s: %v", digest, err)
|
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||||
}
|
}
|
||||||
defer stream.Close()
|
copiedLayers := map[string]struct{}{}
|
||||||
|
for _, info := range layerInfos {
|
||||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
if _, ok := copiedLayers[info.Digest]; !ok {
|
||||||
// use a separate validation failure indicator.
|
if err := copyBlob(dest, rawSource, info.Digest); err != nil {
|
||||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
return err
|
||||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
|
||||||
// read stream to the end, and validation does not happen.
|
|
||||||
digestingReader, err := newDigestingReader(stream, digest)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
|
|
||||||
}
|
}
|
||||||
if _, _, err := dest.PutBlob(digestingReader, digest, blobSize); err != nil {
|
copiedLayers[info.Digest] = struct{}{}
|
||||||
return fmt.Errorf("Error writing blob: %v", err)
|
|
||||||
}
|
|
||||||
if digestingReader.validationFailed { // Coverage: This should never happen.
|
|
||||||
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -180,3 +175,28 @@ func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, des
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func copyBlob(dest types.ImageDestination, src types.ImageSource, digest string) error {
|
||||||
|
stream, blobSize, err := src.GetBlob(digest)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error reading blob %s: %v", digest, err)
|
||||||
|
}
|
||||||
|
defer stream.Close()
|
||||||
|
|
||||||
|
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||||
|
// use a separate validation failure indicator.
|
||||||
|
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||||
|
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||||
|
// read stream to the end, and validation does not happen.
|
||||||
|
digestingReader, err := newDigestingReader(stream, digest)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
|
||||||
|
}
|
||||||
|
if _, err := dest.PutBlob(digestingReader, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
|
||||||
|
return fmt.Errorf("Error writing blob: %v", err)
|
||||||
|
}
|
||||||
|
if digestingReader.validationFailed { // Coverage: This should never happen.
|
||||||
|
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
24
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
24
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@ -40,16 +40,16 @@ func (d *dirImageDestination) SupportsSignatures() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBlob writes contents of stream and returns its computed digest and size.
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *dirImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) {
|
func (d *dirImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
succeeded := false
|
succeeded := false
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -64,24 +64,24 @@ func (d *dirImageDestination) PutBlob(stream io.Reader, digest string, expectedS
|
|||||||
|
|
||||||
size, err := io.Copy(blobFile, tee)
|
size, err := io.Copy(blobFile, tee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
computedDigest := hex.EncodeToString(h.Sum(nil))
|
computedDigest := hex.EncodeToString(h.Sum(nil))
|
||||||
if expectedSize != -1 && size != expectedSize {
|
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||||
return "", -1, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, expectedSize, size)
|
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||||
}
|
}
|
||||||
if err := blobFile.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
if err := blobFile.Chmod(0644); err != nil {
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
blobPath := d.ref.layerPath(computedDigest)
|
blobPath := d.ref.layerPath(computedDigest)
|
||||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
succeeded = true
|
succeeded = true
|
||||||
return "sha256:" + computedDigest, size, nil
|
return types.BlobInfo{Digest: "sha256:" + computedDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
func (d *dirImageDestination) PutManifest(manifest []byte) error {
|
||||||
|
40
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
40
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@ -62,29 +62,29 @@ func (d *dockerImageDestination) SupportsSignatures() error {
|
|||||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBlob writes contents of stream and returns its computed digest and size.
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *dockerImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) {
|
func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
if digest != "" {
|
if inputInfo.Digest != "" {
|
||||||
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), digest)
|
checkURL := fmt.Sprintf(blobsURL, d.ref.ref.RemoteName(), inputInfo.Digest)
|
||||||
|
|
||||||
logrus.Debugf("Checking %s", checkURL)
|
logrus.Debugf("Checking %s", checkURL)
|
||||||
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
res, err := d.c.makeRequest("HEAD", checkURL, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode == http.StatusOK {
|
if res.StatusCode == http.StatusOK {
|
||||||
logrus.Debugf("... already exists, not uploading")
|
logrus.Debugf("... already exists, not uploading")
|
||||||
blobLength, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
blobLength, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
return digest, blobLength, nil
|
return types.BlobInfo{Digest: inputInfo.Digest, Size: blobLength}, nil
|
||||||
}
|
}
|
||||||
logrus.Debugf("... failed, status %d", res.StatusCode)
|
logrus.Debugf("... failed, status %d", res.StatusCode)
|
||||||
}
|
}
|
||||||
@ -94,24 +94,24 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, digest string, expect
|
|||||||
logrus.Debugf("Uploading %s", uploadURL)
|
logrus.Debugf("Uploading %s", uploadURL)
|
||||||
res, err := d.c.makeRequest("POST", uploadURL, nil, nil)
|
res, err := d.c.makeRequest("POST", uploadURL, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusAccepted {
|
if res.StatusCode != http.StatusAccepted {
|
||||||
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
logrus.Debugf("Error initiating layer upload, response %#v", *res)
|
||||||
return "", -1, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
return types.BlobInfo{}, fmt.Errorf("Error initiating layer upload to %s, status %d", uploadURL, res.StatusCode)
|
||||||
}
|
}
|
||||||
uploadLocation, err := res.Location()
|
uploadLocation, err := res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
tee := io.TeeReader(stream, h)
|
tee := io.TeeReader(stream, h)
|
||||||
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, expectedSize)
|
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
|
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
hash := h.Sum(nil)
|
hash := h.Sum(nil)
|
||||||
@ -119,27 +119,27 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, digest string, expect
|
|||||||
|
|
||||||
uploadLocation, err = res.Location()
|
uploadLocation, err = res.Location()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
return types.BlobInfo{}, fmt.Errorf("Error determining upload URL: %s", err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: DELETE uploadLocation on failure
|
// FIXME: DELETE uploadLocation on failure
|
||||||
|
|
||||||
locationQuery := uploadLocation.Query()
|
locationQuery := uploadLocation.Query()
|
||||||
// TODO: check digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||||
locationQuery.Set("digest", computedDigest)
|
locationQuery.Set("digest", computedDigest)
|
||||||
uploadLocation.RawQuery = locationQuery.Encode()
|
uploadLocation.RawQuery = locationQuery.Encode()
|
||||||
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1)
|
res, err = d.c.makeRequestToResolvedURL("PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode != http.StatusCreated {
|
if res.StatusCode != http.StatusCreated {
|
||||||
logrus.Debugf("Error uploading layer, response %#v", *res)
|
logrus.Debugf("Error uploading layer, response %#v", *res)
|
||||||
return "", -1, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
return types.BlobInfo{}, fmt.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Upload of layer %s complete", digest)
|
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||||
return computedDigest, res.Request.ContentLength, nil
|
return types.BlobInfo{Digest: computedDigest, Size: res.Request.ContentLength}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *dockerImageDestination) PutManifest(m []byte) error {
|
func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||||
|
10
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
10
vendor/github.com/containers/image/docker/lookaside.go
generated
vendored
@ -34,8 +34,8 @@ type registryConfiguration struct {
|
|||||||
|
|
||||||
// registryNamespace defines lookaside locations for a single namespace.
|
// registryNamespace defines lookaside locations for a single namespace.
|
||||||
type registryNamespace struct {
|
type registryNamespace struct {
|
||||||
SigStore string `json:"sigstore"` // For reading, and if SigStoreWrite is not present, for writing.
|
SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
|
||||||
SigStoreWrite string `json:"sigstore-write"` // For writing only.
|
SigStoreStaging string `json:"sigstore-staging"` // For writing only.
|
||||||
}
|
}
|
||||||
|
|
||||||
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
|
||||||
@ -175,9 +175,9 @@ func (config *registryConfiguration) signatureTopLevel(ref dockerReference, writ
|
|||||||
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
|
// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
|
||||||
// or "" if nothing has been configured.
|
// or "" if nothing has been configured.
|
||||||
func (ns registryNamespace) signatureTopLevel(write bool) string {
|
func (ns registryNamespace) signatureTopLevel(write bool) string {
|
||||||
if write && ns.SigStoreWrite != "" {
|
if write && ns.SigStoreStaging != "" {
|
||||||
logrus.Debugf(` Using %s`, ns.SigStoreWrite)
|
logrus.Debugf(` Using %s`, ns.SigStoreStaging)
|
||||||
return ns.SigStoreWrite
|
return ns.SigStoreStaging
|
||||||
}
|
}
|
||||||
if ns.SigStore != "" {
|
if ns.SigStore != "" {
|
||||||
logrus.Debugf(` Using %s`, ns.SigStore)
|
logrus.Debugf(` Using %s`, ns.SigStore)
|
||||||
|
140
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
Normal file
140
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
package image
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
type fsLayersSchema1 struct {
|
||||||
|
BlobSum string `json:"blobSum"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type manifestSchema1 struct {
|
||||||
|
Name string
|
||||||
|
Tag string
|
||||||
|
FSLayers []fsLayersSchema1 `json:"fsLayers"`
|
||||||
|
History []struct {
|
||||||
|
V1Compatibility string `json:"v1Compatibility"`
|
||||||
|
} `json:"history"`
|
||||||
|
// TODO(runcom) verify the downloaded manifest
|
||||||
|
//Signature []byte `json:"signature"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
|
||||||
|
mschema1 := &manifestSchema1{}
|
||||||
|
if err := json.Unmarshal(manifest, mschema1); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := fixManifestLayers(mschema1); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// TODO(runcom): verify manifest schema 1, 2 etc
|
||||||
|
//if len(m.FSLayers) != len(m.History) {
|
||||||
|
//return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
|
||||||
|
//}
|
||||||
|
//if len(m.FSLayers) == 0 {
|
||||||
|
//return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
|
||||||
|
//}
|
||||||
|
return mschema1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
|
||||||
|
return types.BlobInfo{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||||
|
layers := make([]types.BlobInfo, len(m.FSLayers))
|
||||||
|
for i, layer := range m.FSLayers {
|
||||||
|
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
||||||
|
}
|
||||||
|
return layers
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema1) Config() ([]byte, error) {
|
||||||
|
return []byte(m.History[0].V1Compatibility), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema1) ImageInspectInfo() (*types.ImageInspectInfo, error) {
|
||||||
|
v1 := &v1Image{}
|
||||||
|
config, err := m.Config()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(config, v1); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &types.ImageInspectInfo{
|
||||||
|
Tag: m.Tag,
|
||||||
|
DockerVersion: v1.DockerVersion,
|
||||||
|
Created: v1.Created,
|
||||||
|
Labels: v1.Config.Labels,
|
||||||
|
Architecture: v1.Architecture,
|
||||||
|
Os: v1.OS,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fixManifestLayers, after validating the supplied manifest
|
||||||
|
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
|
||||||
|
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
|
||||||
|
// both from manifest.History and manifest.FSLayers).
|
||||||
|
// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
|
||||||
|
// (for Dockerfile operations which change the configuration but not the filesystem).
|
||||||
|
func fixManifestLayers(manifest *manifestSchema1) error {
|
||||||
|
type imageV1 struct {
|
||||||
|
ID string
|
||||||
|
Parent string
|
||||||
|
}
|
||||||
|
// Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
|
||||||
|
imgs := make([]*imageV1, len(manifest.FSLayers))
|
||||||
|
for i := range manifest.FSLayers {
|
||||||
|
img := &imageV1{}
|
||||||
|
|
||||||
|
if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
imgs[i] = img
|
||||||
|
if err := validateV1ID(img.ID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if imgs[len(imgs)-1].Parent != "" {
|
||||||
|
return errors.New("Invalid parent ID in the base layer of the image.")
|
||||||
|
}
|
||||||
|
// check general duplicates to error instead of a deadlock
|
||||||
|
idmap := make(map[string]struct{})
|
||||||
|
var lastID string
|
||||||
|
for _, img := range imgs {
|
||||||
|
// skip IDs that appear after each other, we handle those later
|
||||||
|
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
||||||
|
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
||||||
|
}
|
||||||
|
lastID = img.ID
|
||||||
|
idmap[lastID] = struct{}{}
|
||||||
|
}
|
||||||
|
// backwards loop so that we keep the remaining indexes after removing items
|
||||||
|
for i := len(imgs) - 2; i >= 0; i-- {
|
||||||
|
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
||||||
|
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
|
||||||
|
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
|
||||||
|
} else if imgs[i].Parent != imgs[i+1].ID {
|
||||||
|
return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateV1ID(id string) error {
|
||||||
|
if ok := validHex.MatchString(id); !ok {
|
||||||
|
return fmt.Errorf("image ID %q is invalid", id)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
68
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
Normal file
68
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
package image
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type descriptor struct {
|
||||||
|
MediaType string `json:"mediaType"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Digest string `json:"digest"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type manifestSchema2 struct {
|
||||||
|
src types.ImageSource
|
||||||
|
ConfigDescriptor descriptor `json:"config"`
|
||||||
|
LayersDescriptors []descriptor `json:"layers"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
|
||||||
|
v2s2 := manifestSchema2{src: src}
|
||||||
|
if err := json.Unmarshal(manifest, &v2s2); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &v2s2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
|
||||||
|
return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
|
||||||
|
blobs := []types.BlobInfo{}
|
||||||
|
for _, layer := range m.LayersDescriptors {
|
||||||
|
blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size})
|
||||||
|
}
|
||||||
|
return blobs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema2) Config() ([]byte, error) {
|
||||||
|
rawConfig, _, err := m.src.GetBlob(m.ConfigDescriptor.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
config, err := ioutil.ReadAll(rawConfig)
|
||||||
|
rawConfig.Close()
|
||||||
|
return config, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *manifestSchema2) ImageInspectInfo() (*types.ImageInspectInfo, error) {
|
||||||
|
config, err := m.Config()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
v1 := &v1Image{}
|
||||||
|
if err := json.Unmarshal(config, v1); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &types.ImageInspectInfo{
|
||||||
|
DockerVersion: v1.DockerVersion,
|
||||||
|
Created: v1.Created,
|
||||||
|
Labels: v1.Config.Labels,
|
||||||
|
Architecture: v1.Architecture,
|
||||||
|
Os: v1.OS,
|
||||||
|
}, nil
|
||||||
|
}
|
258
vendor/github.com/containers/image/image/image.go
generated
vendored
258
vendor/github.com/containers/image/image/image.go
generated
vendored
@ -4,21 +4,14 @@
|
|||||||
package image
|
package image
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"regexp"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// genericImage is a general set of utilities for working with container images,
|
// genericImage is a general set of utilities for working with container images,
|
||||||
// whatever is their underlying location (i.e. dockerImageSource-independent).
|
// whatever is their underlying location (i.e. dockerImageSource-independent).
|
||||||
// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
|
// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
|
||||||
@ -59,7 +52,7 @@ func (i *genericImage) Close() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||||
// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed.
|
// NOTE: It is essential for signature verification that Manifest returns the manifest from which ConfigInfo and LayerInfos is computed.
|
||||||
func (i *genericImage) Manifest() ([]byte, string, error) {
|
func (i *genericImage) Manifest() ([]byte, string, error) {
|
||||||
if i.cachedManifest == nil {
|
if i.cachedManifest == nil {
|
||||||
m, mt, err := i.src.GetManifest()
|
m, mt, err := i.src.GetManifest()
|
||||||
@ -92,15 +85,6 @@ func (i *genericImage) Signatures() ([][]byte, error) {
|
|||||||
return i.cachedSignatures, nil
|
return i.cachedSignatures, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *genericImage) Inspect() (*types.ImageInspectInfo, error) {
|
|
||||||
// TODO(runcom): unused version param for now, default to docker v2-1
|
|
||||||
m, err := i.getParsedManifest()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return m.ImageInspectInfo()
|
|
||||||
}
|
|
||||||
|
|
||||||
type config struct {
|
type config struct {
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
}
|
}
|
||||||
@ -121,121 +105,9 @@ type v1Image struct {
|
|||||||
// will support v1 one day...
|
// will support v1 one day...
|
||||||
type genericManifest interface {
|
type genericManifest interface {
|
||||||
Config() ([]byte, error)
|
Config() ([]byte, error)
|
||||||
LayerDigests() []string
|
ConfigInfo() types.BlobInfo
|
||||||
BlobDigests() []string
|
LayerInfos() []types.BlobInfo
|
||||||
ImageInspectInfo() (*types.ImageInspectInfo, error)
|
ImageInspectInfo() (*types.ImageInspectInfo, error) // The caller will need to fill in Layers
|
||||||
}
|
|
||||||
|
|
||||||
type fsLayersSchema1 struct {
|
|
||||||
BlobSum string `json:"blobSum"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// compile-time check that manifestSchema1 implements genericManifest
|
|
||||||
var _ genericManifest = (*manifestSchema1)(nil)
|
|
||||||
|
|
||||||
type manifestSchema1 struct {
|
|
||||||
Name string
|
|
||||||
Tag string
|
|
||||||
FSLayers []fsLayersSchema1 `json:"fsLayers"`
|
|
||||||
History []struct {
|
|
||||||
V1Compatibility string `json:"v1Compatibility"`
|
|
||||||
} `json:"history"`
|
|
||||||
// TODO(runcom) verify the downloaded manifest
|
|
||||||
//Signature []byte `json:"signature"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema1) LayerDigests() []string {
|
|
||||||
layers := make([]string, len(m.FSLayers))
|
|
||||||
for i, layer := range m.FSLayers {
|
|
||||||
layers[i] = layer.BlobSum
|
|
||||||
}
|
|
||||||
return layers
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema1) BlobDigests() []string {
|
|
||||||
return m.LayerDigests()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema1) Config() ([]byte, error) {
|
|
||||||
return []byte(m.History[0].V1Compatibility), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema1) ImageInspectInfo() (*types.ImageInspectInfo, error) {
|
|
||||||
v1 := &v1Image{}
|
|
||||||
config, err := m.Config()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(config, v1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &types.ImageInspectInfo{
|
|
||||||
Tag: m.Tag,
|
|
||||||
DockerVersion: v1.DockerVersion,
|
|
||||||
Created: v1.Created,
|
|
||||||
Labels: v1.Config.Labels,
|
|
||||||
Architecture: v1.Architecture,
|
|
||||||
Os: v1.OS,
|
|
||||||
Layers: m.LayerDigests(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// compile-time check that manifestSchema2 implements genericManifest
|
|
||||||
var _ genericManifest = (*manifestSchema2)(nil)
|
|
||||||
|
|
||||||
type manifestSchema2 struct {
|
|
||||||
src types.ImageSource
|
|
||||||
ConfigDescriptor descriptor `json:"config"`
|
|
||||||
LayersDescriptors []descriptor `json:"layers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type descriptor struct {
|
|
||||||
MediaType string `json:"mediaType"`
|
|
||||||
Size int64 `json:"size"`
|
|
||||||
Digest string `json:"digest"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema2) LayerDigests() []string {
|
|
||||||
blobs := []string{}
|
|
||||||
for _, layer := range m.LayersDescriptors {
|
|
||||||
blobs = append(blobs, layer.Digest)
|
|
||||||
}
|
|
||||||
return blobs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema2) BlobDigests() []string {
|
|
||||||
blobs := m.LayerDigests()
|
|
||||||
blobs = append(blobs, m.ConfigDescriptor.Digest)
|
|
||||||
return blobs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema2) Config() ([]byte, error) {
|
|
||||||
rawConfig, _, err := m.src.GetBlob(m.ConfigDescriptor.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config, err := ioutil.ReadAll(rawConfig)
|
|
||||||
rawConfig.Close()
|
|
||||||
return config, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manifestSchema2) ImageInspectInfo() (*types.ImageInspectInfo, error) {
|
|
||||||
config, err := m.Config()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
v1 := &v1Image{}
|
|
||||||
if err := json.Unmarshal(config, v1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &types.ImageInspectInfo{
|
|
||||||
DockerVersion: v1.DockerVersion,
|
|
||||||
Created: v1.Created,
|
|
||||||
Labels: v1.Config.Labels,
|
|
||||||
Architecture: v1.Architecture,
|
|
||||||
Os: v1.OS,
|
|
||||||
Layers: m.LayerDigests(),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getParsedManifest parses the manifest into a data structure, cleans it up, and returns it.
|
// getParsedManifest parses the manifest into a data structure, cleans it up, and returns it.
|
||||||
@ -252,27 +124,9 @@ func (i *genericImage) getParsedManifest() (genericManifest, error) {
|
|||||||
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
// This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
|
||||||
// need to happen within the ImageSource.
|
// need to happen within the ImageSource.
|
||||||
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
|
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
|
||||||
mschema1 := &manifestSchema1{}
|
return manifestSchema1FromManifest(manblob)
|
||||||
if err := json.Unmarshal(manblob, mschema1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := fixManifestLayers(mschema1); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// TODO(runcom): verify manifest schema 1, 2 etc
|
|
||||||
//if len(m.FSLayers) != len(m.History) {
|
|
||||||
//return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String())
|
|
||||||
//}
|
|
||||||
//if len(m.FSLayers) == 0 {
|
|
||||||
//return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String())
|
|
||||||
//}
|
|
||||||
return mschema1, nil
|
|
||||||
case manifest.DockerV2Schema2MediaType:
|
case manifest.DockerV2Schema2MediaType:
|
||||||
v2s2 := manifestSchema2{src: i.src}
|
return manifestSchema2FromManifest(i.src, manblob)
|
||||||
if err := json.Unmarshal(manblob, &v2s2); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &v2s2, nil
|
|
||||||
case "":
|
case "":
|
||||||
return nil, errors.New("could not guess manifest media type")
|
return nil, errors.New("could not guess manifest media type")
|
||||||
default:
|
default:
|
||||||
@ -280,86 +134,42 @@ func (i *genericImage) getParsedManifest() (genericManifest, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// uniqueBlobDigests returns a list of blob digests referenced from a manifest.
|
func (i *genericImage) Inspect() (*types.ImageInspectInfo, error) {
|
||||||
// The list will not contain duplicates; it is not intended to correspond to the "history" or "parent chain" of a Docker image.
|
// TODO(runcom): unused version param for now, default to docker v2-1
|
||||||
func uniqueBlobDigests(m genericManifest) []string {
|
|
||||||
var res []string
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
for _, digest := range m.BlobDigests() {
|
|
||||||
if _, ok := seen[digest]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[digest] = struct{}{}
|
|
||||||
res = append(res, digest)
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// BlobDigests returns a list of blob digests referenced by this image.
|
|
||||||
// The list will not contain duplicates; it is not intended to correspond to the "history" or "parent chain" of a Docker image.
|
|
||||||
// NOTE: It is essential for signature verification that BlobDigests is computed from the same manifest which is returned by Manifest().
|
|
||||||
func (i *genericImage) BlobDigests() ([]string, error) {
|
|
||||||
m, err := i.getParsedManifest()
|
m, err := i.getParsedManifest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return uniqueBlobDigests(m), nil
|
info, err := m.ImageInspectInfo()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
layers := m.LayerInfos()
|
||||||
|
info.Layers = make([]string, len(layers))
|
||||||
|
for i, layer := range layers {
|
||||||
|
info.Layers[i] = layer.Digest
|
||||||
|
}
|
||||||
|
return info, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// fixManifestLayers, after validating the supplied manifest
|
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||||
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
|
// NOTE: It is essential for signature verification that ConfigInfo is computed from the same manifest which is returned by Manifest().
|
||||||
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
|
func (i *genericImage) ConfigInfo() (types.BlobInfo, error) {
|
||||||
// both from manifest.History and manifest.FSLayers).
|
m, err := i.getParsedManifest()
|
||||||
// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
|
if err != nil {
|
||||||
// (for Dockerfile operations which change the configuration but not the filesystem).
|
return types.BlobInfo{}, err
|
||||||
func fixManifestLayers(manifest *manifestSchema1) error {
|
|
||||||
type imageV1 struct {
|
|
||||||
ID string
|
|
||||||
Parent string
|
|
||||||
}
|
}
|
||||||
// Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
|
return m.ConfigInfo(), nil
|
||||||
imgs := make([]*imageV1, len(manifest.FSLayers))
|
|
||||||
for i := range manifest.FSLayers {
|
|
||||||
img := &imageV1{}
|
|
||||||
|
|
||||||
if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
imgs[i] = img
|
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||||
if err := validateV1ID(img.ID); err != nil {
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
return err
|
// NOTE: It is essential for signature verification that LayerInfos is computed from the same manifest which is returned by Manifest().
|
||||||
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
|
func (i *genericImage) LayerInfos() ([]types.BlobInfo, error) {
|
||||||
|
m, err := i.getParsedManifest()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
return m.LayerInfos(), nil
|
||||||
if imgs[len(imgs)-1].Parent != "" {
|
|
||||||
return errors.New("Invalid parent ID in the base layer of the image.")
|
|
||||||
}
|
|
||||||
// check general duplicates to error instead of a deadlock
|
|
||||||
idmap := make(map[string]struct{})
|
|
||||||
var lastID string
|
|
||||||
for _, img := range imgs {
|
|
||||||
// skip IDs that appear after each other, we handle those later
|
|
||||||
if _, exists := idmap[img.ID]; img.ID != lastID && exists {
|
|
||||||
return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
|
|
||||||
}
|
|
||||||
lastID = img.ID
|
|
||||||
idmap[lastID] = struct{}{}
|
|
||||||
}
|
|
||||||
// backwards loop so that we keep the remaining indexes after removing items
|
|
||||||
for i := len(imgs) - 2; i >= 0; i-- {
|
|
||||||
if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
|
|
||||||
manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
|
|
||||||
manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
|
|
||||||
} else if imgs[i].Parent != imgs[i+1].ID {
|
|
||||||
return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateV1ID(id string) error {
|
|
||||||
if ok := validHex.MatchString(id); !ok {
|
|
||||||
return fmt.Errorf("image ID %q is invalid", id)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
30
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
30
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@ -49,19 +49,19 @@ func (d *ociImageDestination) SupportsSignatures() error {
|
|||||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBlob writes contents of stream and returns its computed digest and size.
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *ociImageDestination) PutBlob(stream io.Reader, _ string, expectedSize int64) (string, int64, error) {
|
func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
if err := ensureDirectoryExists(d.ref.dir); err != nil {
|
if err := ensureDirectoryExists(d.ref.dir); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
succeeded := false
|
succeeded := false
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -76,31 +76,31 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, _ string, expectedSize i
|
|||||||
|
|
||||||
size, err := io.Copy(blobFile, tee)
|
size, err := io.Copy(blobFile, tee)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
computedDigest := "sha256:" + hex.EncodeToString(h.Sum(nil))
|
computedDigest := "sha256:" + hex.EncodeToString(h.Sum(nil))
|
||||||
if expectedSize != -1 && size != expectedSize {
|
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||||
return "", -1, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, expectedSize, size)
|
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", computedDigest, inputInfo.Size, size)
|
||||||
}
|
}
|
||||||
if err := blobFile.Sync(); err != nil {
|
if err := blobFile.Sync(); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
if err := blobFile.Chmod(0644); err != nil {
|
if err := blobFile.Chmod(0644); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blobPath, err := d.ref.blobPath(computedDigest)
|
blobPath, err := d.ref.blobPath(computedDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||||
return "", -1, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
succeeded = true
|
succeeded = true
|
||||||
return computedDigest, size, nil
|
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createManifest(m []byte) ([]byte, string, error) {
|
func createManifest(m []byte) ([]byte, string, error) {
|
||||||
|
9
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
9
vendor/github.com/containers/image/openshift/openshift-copies.go
generated
vendored
@ -950,7 +950,8 @@ func (m *clustersMap) UnmarshalJSON(data []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, e := range a {
|
for _, e := range a {
|
||||||
(*m)[e.Name] = &e.Cluster
|
cluster := e.Cluster // Allocates a new instance in each iteration
|
||||||
|
(*m)[e.Name] = &cluster
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -963,7 +964,8 @@ func (m *authInfosMap) UnmarshalJSON(data []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, e := range a {
|
for _, e := range a {
|
||||||
(*m)[e.Name] = &e.AuthInfo
|
authInfo := e.AuthInfo // Allocates a new instance in each iteration
|
||||||
|
(*m)[e.Name] = &authInfo
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -976,7 +978,8 @@ func (m *contextsMap) UnmarshalJSON(data []byte) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, e := range a {
|
for _, e := range a {
|
||||||
(*m)[e.Name] = &e.Context
|
context := e.Context // Allocates a new instance in each iteration
|
||||||
|
(*m)[e.Name] = &context
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
10
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
10
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -337,14 +337,14 @@ func (d *openshiftImageDestination) SupportsSignatures() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutBlob writes contents of stream and returns its computed digest and size.
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
func (d *openshiftImageDestination) PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error) {
|
func (d *openshiftImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
return d.docker.PutBlob(stream, digest, expectedSize)
|
return d.docker.PutBlob(stream, inputInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
func (d *openshiftImageDestination) PutManifest(m []byte) error {
|
||||||
|
29
vendor/github.com/containers/image/types/types.go
generated
vendored
29
vendor/github.com/containers/image/types/types.go
generated
vendored
@ -86,6 +86,13 @@ type ImageReference interface {
|
|||||||
DeleteImage(ctx *SystemContext) error
|
DeleteImage(ctx *SystemContext) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// BlobInfo collects known information about a blob (layer/config).
|
||||||
|
// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
|
||||||
|
type BlobInfo struct {
|
||||||
|
Digest string // "" if unknown.
|
||||||
|
Size int64 // -1 if unknown
|
||||||
|
}
|
||||||
|
|
||||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
// ImageSource is a service, possibly remote (= slow), to download components of a single image.
|
||||||
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
||||||
// is usually more useful.
|
// is usually more useful.
|
||||||
@ -127,13 +134,13 @@ type ImageDestination interface {
|
|||||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
SupportsSignatures() error
|
SupportsSignatures() error
|
||||||
|
|
||||||
// PutBlob writes contents of stream and returns its computed digest and size.
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
// A digest can be optionally provided if known, the specific image destination can decide to play with it or not.
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
// The length of stream is expected to be expectedSize; if expectedSize == -1, it is not known.
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
// to any other readers for download using the supplied digest.
|
// to any other readers for download using the supplied digest.
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
PutBlob(stream io.Reader, digest string, expectedSize int64) (string, int64, error)
|
PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
|
||||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||||
PutManifest([]byte) error
|
PutManifest([]byte) error
|
||||||
PutSignatures(signatures [][]byte) error
|
PutSignatures(signatures [][]byte) error
|
||||||
@ -154,14 +161,18 @@ type Image interface {
|
|||||||
Close()
|
Close()
|
||||||
// ref to repository?
|
// ref to repository?
|
||||||
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
|
||||||
// NOTE: It is essential for signature verification that Manifest returns the manifest from which BlobDigests is computed.
|
// NOTE: It is essential for signature verification that Manifest returns the manifest from which ConfigInfo and LayerInfos is computed.
|
||||||
Manifest() ([]byte, string, error)
|
Manifest() ([]byte, string, error)
|
||||||
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
|
||||||
Signatures() ([][]byte, error)
|
Signatures() ([][]byte, error)
|
||||||
// BlobDigests returns a list of blob digests referenced by this image.
|
// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
|
||||||
// The list will not contain duplicates; it is not intended to correspond to the "history" or "parent chain" of a Docker image.
|
// NOTE: It is essential for signature verification that ConfigInfo is computed from the same manifest which is returned by Manifest().
|
||||||
// NOTE: It is essential for signature verification that BlobDigests is computed from the same manifest which is returned by Manifest().
|
ConfigInfo() (BlobInfo, error)
|
||||||
BlobDigests() ([]string, error)
|
// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
|
||||||
|
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||||
|
// NOTE: It is essential for signature verification that LayerInfos is computed from the same manifest which is returned by Manifest().
|
||||||
|
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||||
|
LayerInfos() ([]BlobInfo, error)
|
||||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
||||||
Inspect() (*ImageInspectInfo, error)
|
Inspect() (*ImageInspectInfo, error)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user