mirror of
https://github.com/containers/skopeo.git
synced 2025-09-18 16:08:59 +00:00
Vendor after merging mtrmac/image:compress
This commit is contained in:
160
vendor/github.com/containers/image/copy/copy.go
generated
vendored
160
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@@ -1,14 +1,19 @@
|
||||
package copy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
@@ -120,27 +125,52 @@ func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, des
|
||||
return fmt.Errorf("Can not copy signatures: %v", err)
|
||||
}
|
||||
}
|
||||
canModifyManifest := len(sigs) == 0
|
||||
|
||||
configInfo, err := src.ConfigInfo()
|
||||
srcConfigInfo, err := src.ConfigInfo()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||
}
|
||||
if configInfo.Digest != "" {
|
||||
if err := copyBlob(dest, rawSource, configInfo.Digest); err != nil {
|
||||
if srcConfigInfo.Digest != "" {
|
||||
destConfigInfo, err := copyBlob(dest, rawSource, srcConfigInfo, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if destConfigInfo.Digest != srcConfigInfo.Digest {
|
||||
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcConfigInfo.Digest, destConfigInfo.Digest)
|
||||
}
|
||||
}
|
||||
layerInfos, err := src.LayerInfos()
|
||||
|
||||
srcLayerInfos, err := src.LayerInfos()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error parsing manifest: %v", err)
|
||||
}
|
||||
copiedLayers := map[string]struct{}{}
|
||||
for _, info := range layerInfos {
|
||||
if _, ok := copiedLayers[info.Digest]; !ok {
|
||||
if err := copyBlob(dest, rawSource, info.Digest); err != nil {
|
||||
destLayerInfos := []types.BlobInfo{}
|
||||
copiedLayers := map[string]types.BlobInfo{}
|
||||
for _, srcLayer := range srcLayerInfos {
|
||||
destLayer, ok := copiedLayers[srcLayer.Digest]
|
||||
if !ok {
|
||||
destLayer, err = copyBlob(dest, rawSource, srcLayer, canModifyManifest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copiedLayers[info.Digest] = struct{}{}
|
||||
copiedLayers[srcLayer.Digest] = destLayer
|
||||
}
|
||||
destLayerInfos = append(destLayerInfos, destLayer)
|
||||
}
|
||||
|
||||
manifestUpdates := types.ManifestUpdateOptions{}
|
||||
if layerDigestsDiffer(srcLayerInfos, destLayerInfos) {
|
||||
manifestUpdates.LayerInfos = destLayerInfos
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{}) {
|
||||
if !canModifyManifest {
|
||||
return fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
|
||||
}
|
||||
manifest, err = src.UpdatedManifest(manifestUpdates)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating an updated manifest: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -176,27 +206,117 @@ func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, des
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyBlob(dest types.ImageDestination, src types.ImageSource, digest string) error {
|
||||
stream, blobSize, err := src.GetBlob(digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error reading blob %s: %v", digest, err)
|
||||
// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields)
|
||||
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
|
||||
if len(a) != len(b) {
|
||||
return true
|
||||
}
|
||||
defer stream.Close()
|
||||
for i := range a {
|
||||
if a[i].Digest != b[i].Digest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// copyBlob copies a blob with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied blob.
|
||||
func copyBlob(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo, canCompress bool) (types.BlobInfo, error) {
|
||||
srcStream, srcBlobSize, err := src.GetBlob(srcInfo.Digest) // We currently completely ignore srcInfo.Size throughout.
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
|
||||
}
|
||||
defer srcStream.Close()
|
||||
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(stream, digest)
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
|
||||
return types.BlobInfo{}, fmt.Errorf("Error preparing to verify blob %s: %v", srcInfo.Digest, err)
|
||||
}
|
||||
if _, err := dest.PutBlob(digestingReader, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
|
||||
return fmt.Errorf("Error writing blob: %v", err)
|
||||
var destStream io.Reader = digestingReader
|
||||
isCompressed, destStream, err := isStreamCompressed(destStream) // We could skip this in some cases, but let's keep the code path uniform
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
|
||||
}
|
||||
|
||||
var inputInfo types.BlobInfo
|
||||
if !canCompress || isCompressed || !dest.ShouldCompressLayers() {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
inputInfo.Digest = srcInfo.Digest
|
||||
inputInfo.Size = srcBlobSize
|
||||
} else {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
|
||||
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
|
||||
// we don’t care.
|
||||
go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
|
||||
destStream = pipeReader
|
||||
inputInfo.Digest = ""
|
||||
inputInfo.Size = -1
|
||||
}
|
||||
uploadedInfo, err := dest.PutBlob(destStream, inputInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err)
|
||||
}
|
||||
if digestingReader.validationFailed { // Coverage: This should never happen.
|
||||
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
|
||||
return types.BlobInfo{}, fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", srcInfo.Digest)
|
||||
}
|
||||
return nil
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, fmt.Errorf("Internal error uploading blob %s, blob with digest %s uploaded with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
// compressionPrefixes is an internal implementation detail of isStreamCompressed
|
||||
var compressionPrefixes = map[string][]byte{
|
||||
"gzip": {0x1F, 0x8B, 0x08}, // gzip (RFC 1952)
|
||||
"bzip2": {0x42, 0x5A, 0x68}, // bzip2 (decompress.c:BZ2_decompress)
|
||||
"xz": {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz (/usr/share/doc/xz/xz-file-format.txt)
|
||||
}
|
||||
|
||||
// isStreamCompressed returns true if input is recognized as a compressed format.
|
||||
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
|
||||
func isStreamCompressed(input io.Reader) (bool, io.Reader, error) {
|
||||
buffer := [8]byte{}
|
||||
|
||||
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
|
||||
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
isCompressed := false
|
||||
for algo, prefix := range compressionPrefixes {
|
||||
if bytes.HasPrefix(buffer[:n], prefix) {
|
||||
logrus.Debugf("Detected compression format %s", algo)
|
||||
isCompressed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isCompressed {
|
||||
logrus.Debugf("No compression detected")
|
||||
}
|
||||
|
||||
return isCompressed, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
|
||||
}
|
||||
|
||||
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
|
||||
func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
|
||||
err := errors.New("Internal error: unexpected panic in compressGoroutine")
|
||||
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
|
||||
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
|
||||
}()
|
||||
|
||||
zipper := gzip.NewWriter(dest)
|
||||
defer zipper.Close()
|
||||
|
||||
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
5
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@@ -40,6 +40,11 @@ func (d *dirImageDestination) SupportsSignatures() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *dirImageDestination) ShouldCompressLayers() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
18
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
18
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@@ -62,6 +62,19 @@ func (d *dockerImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *dockerImageDestination) ShouldCompressLayers() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// sizeCounter is an io.Writer which only counts the total size of its input.
|
||||
type sizeCounter struct{ size int64 }
|
||||
|
||||
func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
c.size += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@@ -107,7 +120,8 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
tee := io.TeeReader(stream, h)
|
||||
sizeCounter := &sizeCounter{}
|
||||
tee := io.TeeReader(stream, io.MultiWriter(h, sizeCounter))
|
||||
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
|
||||
@@ -139,7 +153,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
return types.BlobInfo{Digest: computedDigest, Size: res.Request.ContentLength}, nil
|
||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) PutManifest(m []byte) error {
|
||||
|
37
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
37
vendor/github.com/containers/image/image/docker_schema1.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
@@ -18,14 +19,14 @@ type fsLayersSchema1 struct {
|
||||
}
|
||||
|
||||
type manifestSchema1 struct {
|
||||
Name string
|
||||
Tag string
|
||||
FSLayers []fsLayersSchema1 `json:"fsLayers"`
|
||||
History []struct {
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag"`
|
||||
Architecture string `json:"architecture"`
|
||||
FSLayers []fsLayersSchema1 `json:"fsLayers"`
|
||||
History []struct {
|
||||
V1Compatibility string `json:"v1Compatibility"`
|
||||
} `json:"history"`
|
||||
// TODO(runcom) verify the downloaded manifest
|
||||
//Signature []byte `json:"signature"`
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
}
|
||||
|
||||
func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
|
||||
@@ -52,7 +53,7 @@ func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
|
||||
|
||||
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
|
||||
layers := make([]types.BlobInfo, len(m.FSLayers))
|
||||
for i, layer := range m.FSLayers {
|
||||
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
|
||||
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
|
||||
}
|
||||
return layers
|
||||
@@ -81,6 +82,28 @@ func (m *manifestSchema1) ImageInspectInfo() (*types.ImageInspectInfo, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema1) UpdatedManifest(options types.ManifestUpdateOptions) ([]byte, error) {
|
||||
copy := *m
|
||||
if options.LayerInfos != nil {
|
||||
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
|
||||
if len(copy.FSLayers) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
|
||||
}
|
||||
for i, info := range options.LayerInfos {
|
||||
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
|
||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
|
||||
}
|
||||
}
|
||||
// docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
|
||||
unsigned, err := json.Marshal(copy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manifest.AddDummyV2S1Signature(unsigned)
|
||||
}
|
||||
|
||||
// fixManifestLayers, after validating the supplied manifest
|
||||
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
|
||||
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
|
||||
|
17
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
17
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@@ -2,6 +2,7 @@ package image
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
@@ -15,6 +16,8 @@ type descriptor struct {
|
||||
|
||||
type manifestSchema2 struct {
|
||||
src types.ImageSource
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
ConfigDescriptor descriptor `json:"config"`
|
||||
LayersDescriptors []descriptor `json:"layers"`
|
||||
}
|
||||
@@ -66,3 +69,17 @@ func (m *manifestSchema2) ImageInspectInfo() (*types.ImageInspectInfo, error) {
|
||||
Os: v1.OS,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (m *manifestSchema2) UpdatedManifest(options types.ManifestUpdateOptions) ([]byte, error) {
|
||||
copy := *m
|
||||
if options.LayerInfos != nil {
|
||||
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
|
||||
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
|
||||
}
|
||||
for i, info := range options.LayerInfos {
|
||||
copy.LayersDescriptors[i].Digest = info.Digest
|
||||
copy.LayersDescriptors[i].Size = info.Size
|
||||
}
|
||||
}
|
||||
return json.Marshal(copy)
|
||||
}
|
||||
|
11
vendor/github.com/containers/image/image/image.go
generated
vendored
11
vendor/github.com/containers/image/image/image.go
generated
vendored
@@ -108,6 +108,7 @@ type genericManifest interface {
|
||||
ConfigInfo() types.BlobInfo
|
||||
LayerInfos() []types.BlobInfo
|
||||
ImageInspectInfo() (*types.ImageInspectInfo, error) // The caller will need to fill in Layers
|
||||
UpdatedManifest(types.ManifestUpdateOptions) ([]byte, error)
|
||||
}
|
||||
|
||||
// getParsedManifest parses the manifest into a data structure, cleans it up, and returns it.
|
||||
@@ -173,3 +174,13 @@ func (i *genericImage) LayerInfos() ([]types.BlobInfo, error) {
|
||||
}
|
||||
return m.LayerInfos(), nil
|
||||
}
|
||||
|
||||
// UpdatedManifest returns the image's manifest modified according to updateOptions.
|
||||
// This does not change the state of the Image object.
|
||||
func (i *genericImage) UpdatedManifest(options types.ManifestUpdateOptions) ([]byte, error) {
|
||||
m, err := i.getParsedManifest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m.UpdatedManifest(options)
|
||||
}
|
||||
|
18
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
18
vendor/github.com/containers/image/manifest/manifest.go
generated
vendored
@@ -95,3 +95,21 @@ func MatchesDigest(manifest []byte, expectedDigest string) (bool, error) {
|
||||
}
|
||||
return expectedDigest == actualDigest, nil
|
||||
}
|
||||
|
||||
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
|
||||
// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature).
|
||||
func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
|
||||
key, err := libtrust.GenerateECP256PrivateKey()
|
||||
if err != nil {
|
||||
return nil, err // Coverage: This can fail only if rand.Reader fails.
|
||||
}
|
||||
|
||||
js, err := libtrust.NewJSONSignature(manifest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails.
|
||||
return nil, err
|
||||
}
|
||||
return js.PrettySignature("signatures")
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
5
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@@ -49,6 +49,11 @@ func (d *ociImageDestination) SupportsSignatures() error {
|
||||
return fmt.Errorf("Pushing signatures for OCI images is not supported")
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *ociImageDestination) ShouldCompressLayers() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
5
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
5
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@@ -337,6 +337,11 @@ func (d *openshiftImageDestination) SupportsSignatures() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
func (d *openshiftImageDestination) ShouldCompressLayers() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
10
vendor/github.com/containers/image/types/types.go
generated
vendored
10
vendor/github.com/containers/image/types/types.go
generated
vendored
@@ -133,6 +133,8 @@ type ImageDestination interface {
|
||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||
SupportsSignatures() error
|
||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||
ShouldCompressLayers() bool
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
@@ -175,6 +177,14 @@ type Image interface {
|
||||
LayerInfos() ([]BlobInfo, error)
|
||||
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
|
||||
Inspect() (*ImageInspectInfo, error)
|
||||
// UpdatedManifest returns the image's manifest modified according to options.
|
||||
// This does not change the state of the Image object.
|
||||
UpdatedManifest(options ManifestUpdateOptions) ([]byte, error)
|
||||
}
|
||||
|
||||
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
|
||||
type ManifestUpdateOptions struct {
|
||||
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which should replace the originals, in order (the root layer first, and then successive layered layers)
|
||||
}
|
||||
|
||||
// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration.
|
||||
|
Reference in New Issue
Block a user