1
0
mirror of https://github.com/containers/skopeo.git synced 2025-05-06 15:06:27 +00:00

Merge pull request from mtrmac/compress

Layer compression
This commit is contained in:
Miloslav Trmač 2016-09-19 22:06:14 +02:00 committed by GitHub
commit 98bfef9072
16 changed files with 358 additions and 32 deletions

View File

@ -15,9 +15,12 @@ func init() {
check.Suite(&CopySuite{})
}
const v2DockerRegistryURL = "localhost:5555"
type CopySuite struct {
cluster *openshiftCluster
gpgHome string
cluster *openshiftCluster
registry *testRegistryV2
gpgHome string
}
func (s *CopySuite) SetUpSuite(c *check.C) {
@ -27,7 +30,7 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
s.cluster = startOpenshiftCluster(c) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned"} {
for _, stream := range []string{"unsigned", "personal", "official", "naming", "cosigned", "compression"} {
isJSON := fmt.Sprintf(`{
"kind": "ImageStream",
"apiVersion": "v1",
@ -39,6 +42,8 @@ func (s *CopySuite) SetUpSuite(c *check.C) {
runCommandWithInput(c, isJSON, "oc", "create", "-f", "-")
}
s.registry = setupRegistryV2At(c, v2DockerRegistryURL, false, false) // FIXME: Set up TLS for the docker registry port instead of using "--tls-verify=false" all over the place.
gpgHome, err := ioutil.TempDir("", "skopeo-gpg")
c.Assert(err, check.IsNil)
s.gpgHome = gpgHome
@ -60,6 +65,9 @@ func (s *CopySuite) TearDownSuite(c *check.C) {
if s.gpgHome != "" {
os.RemoveAll(s.gpgHome)
}
if s.registry != nil {
s.registry.Close()
}
if s.cluster != nil {
s.cluster.tearDown()
}
@ -242,3 +250,44 @@ func (s *CopySuite) TestCopyDirSignatures(c *check.C) {
assertSkopeoFails(c, ".*Source image rejected: .*Signature for identity localhost:5000/myns/personal:dirstaging2 is not accepted.*",
"--policy", policy, "copy", topDirDest+"/restricted/badidentity", topDirDest+"/dest")
}
// Compression during copy
func (s *CopySuite) TestCopyCompression(c *check.C) {
const uncompresssedLayerFile = "160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710.tar"
topDir, err := ioutil.TempDir("", "compression-top")
c.Assert(err, check.IsNil)
defer os.RemoveAll(topDir)
for i, t := range []struct{ fixture, remote string }{
//{"uncompressed-image-s1", "docker://" + v2DockerRegistryURL + "/compression/compression:s1"}, // FIXME: depends on push to tag working
//{"uncompressed-image-s2", "docker://" + v2DockerRegistryURL + "/compression/compression:s2"}, // FIXME: depends on push to tag working
{"uncompressed-image-s1", "atomic:localhost:5000/myns/compression:s1"},
//{"uncompressed-image-s2", "atomic:localhost:5000/myns/compression:s2"}, // FIXME: The unresolved "MANIFEST_UNKNOWN"/"unexpected end of JSON input" failure
} {
dir := filepath.Join(topDir, fmt.Sprintf("case%d", i))
err := os.MkdirAll(dir, 0755)
c.Assert(err, check.IsNil)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", "dir:fixtures/"+t.fixture, t.remote)
assertSkopeoSucceeds(c, "", "--tls-verify=false", "copy", t.remote, "dir:"+dir)
// The original directory contained an uncompressed file, the copy after pushing and pulling doesn't (we use a different name for the compressed file).
_, err = os.Lstat(filepath.Join("fixtures", t.fixture, uncompresssedLayerFile))
c.Assert(err, check.IsNil)
_, err = os.Lstat(filepath.Join(dir, uncompresssedLayerFile))
c.Assert(err, check.NotNil)
c.Assert(os.IsNotExist(err), check.Equals, true)
// All pulled layers are smaller than the uncompressed size of uncompresssedLayerFile. (Note that this includes the manifest in s2, but that works out OK).
dirf, err := os.Open(dir)
c.Assert(err, check.IsNil)
fis, err := dirf.Readdir(-1)
c.Assert(err, check.IsNil)
for _, fi := range fis {
if strings.HasSuffix(fi.Name(), ".tar") {
c.Assert(fi.Size() < 2048, check.Equals, true)
}
}
}
}

View File

@ -0,0 +1,32 @@
{
"schemaVersion": 1,
"name": "nonempty",
"tag": "nonempty",
"architecture": "amd64",
"fsLayers": [
{
"blobSum": "sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
}
],
"history": [
{
"v1Compatibility": "{\"architecture\":\"amd64\",\"config\":{\"Hostname\":\"59c20544b2f4\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"container\":\"59c20544b2f4ad7a8639433bacb1ec215b7dad4a7bf1a83b5ab4679329a46c1d\",\"container_config\":{\"Hostname\":\"59c20544b2f4\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /\"],\"Image\":\"\",\"Volumes\":null,\"WorkingDir\":\"\",\"Entrypoint\":null,\"OnBuild\":null,\"Labels\":null},\"created\":\"2016-09-19T18:23:54.9949213Z\",\"docker_version\":\"1.10.3\",\"id\":\"4c224eac5061bb85f523ca4e3316618fd7921a80fe94286979667b1edb8e1bdd\",\"os\":\"linux\"}"
}
],
"signatures": [
{
"header": {
"jwk": {
"crv": "P-256",
"kid": "DGWZ:GAUM:WCOC:IMDL:D67M:CEI6:YTVH:M2CM:5HX4:FYDD:77OD:D3F7",
"kty": "EC",
"x": "eprZNqLO9mHZ4Z4GxefucEgov_1gwEi9lehpJR2suRo",
"y": "wIr2ucNg32ROfVCkR_8A5VbBJ-mFmsoIUVa6vt8lIxM"
},
"alg": "ES256"
},
"signature": "bvTLWW4YVFRjAanN1EJqwQw60fWSWJPxcGO3UZGFI_gyV6ucGdW4x7jyYL6g06sg925s9cy0wN1lw91CCFv4BA",
"protected": "eyJmb3JtYXRMZW5ndGgiOjE0ODcsImZvcm1hdFRhaWwiOiJDbjBLIiwidGltZSI6IjIwMTYtMDktMTlUMTg6NDM6MzNaIn0"
}
]
}

View File

@ -0,0 +1 @@
{"architecture":"amd64","config":{"Hostname":"59c20544b2f4","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"59c20544b2f4ad7a8639433bacb1ec215b7dad4a7bf1a83b5ab4679329a46c1d","container_config":{"Hostname":"59c20544b2f4","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","#(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2016-09-19T18:23:54.9949213Z","docker_version":"1.10.3","history":[{"created":"2016-09-19T18:23:54.9949213Z","created_by":"/bin/sh -c #(nop) ADD file:14f49faade3db5e596826746d9ed3dfd658490c16c4d61d4886726153ad0591a in /"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"]}}

View File

@ -0,0 +1,16 @@
{
"schemaVersion": 2,
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
"config": {
"mediaType": "application/octet-stream",
"size": 1272,
"digest": "sha256:86ce150e65c72b30f885c261449d18b7c6832596916e7f654e08377b5a67b4ff"
},
"layers": [
{
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
"size": 2048,
"digest": "sha256:160d823fdc48e62f97ba62df31e55424f8f5eb6b679c865eec6e59adfe304710"
}
]
}

View File

@ -1,14 +1,19 @@
package copy
import (
"bytes"
"compress/gzip"
"crypto/sha256"
"crypto/subtle"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"reflect"
"strings"
"github.com/Sirupsen/logrus"
"github.com/containers/image/image"
"github.com/containers/image/signature"
"github.com/containers/image/transports"
@ -120,27 +125,52 @@ func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, des
return fmt.Errorf("Can not copy signatures: %v", err)
}
}
canModifyManifest := len(sigs) == 0
configInfo, err := src.ConfigInfo()
srcConfigInfo, err := src.ConfigInfo()
if err != nil {
return fmt.Errorf("Error parsing manifest: %v", err)
}
if configInfo.Digest != "" {
if err := copyBlob(dest, rawSource, configInfo.Digest); err != nil {
if srcConfigInfo.Digest != "" {
destConfigInfo, err := copyBlob(dest, rawSource, srcConfigInfo, false)
if err != nil {
return err
}
if destConfigInfo.Digest != srcConfigInfo.Digest {
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcConfigInfo.Digest, destConfigInfo.Digest)
}
}
layerInfos, err := src.LayerInfos()
srcLayerInfos, err := src.LayerInfos()
if err != nil {
return fmt.Errorf("Error parsing manifest: %v", err)
}
copiedLayers := map[string]struct{}{}
for _, info := range layerInfos {
if _, ok := copiedLayers[info.Digest]; !ok {
if err := copyBlob(dest, rawSource, info.Digest); err != nil {
destLayerInfos := []types.BlobInfo{}
copiedLayers := map[string]types.BlobInfo{}
for _, srcLayer := range srcLayerInfos {
destLayer, ok := copiedLayers[srcLayer.Digest]
if !ok {
destLayer, err = copyBlob(dest, rawSource, srcLayer, canModifyManifest)
if err != nil {
return err
}
copiedLayers[info.Digest] = struct{}{}
copiedLayers[srcLayer.Digest] = destLayer
}
destLayerInfos = append(destLayerInfos, destLayer)
}
manifestUpdates := types.ManifestUpdateOptions{}
if layerDigestsDiffer(srcLayerInfos, destLayerInfos) {
manifestUpdates.LayerInfos = destLayerInfos
}
if !reflect.DeepEqual(manifestUpdates, types.ManifestUpdateOptions{}) {
if !canModifyManifest {
return fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
}
manifest, err = src.UpdatedManifest(manifestUpdates)
if err != nil {
return fmt.Errorf("Error creating an updated manifest: %v", err)
}
}
@ -176,27 +206,117 @@ func Image(ctx *types.SystemContext, policyContext *signature.PolicyContext, des
return nil
}
func copyBlob(dest types.ImageDestination, src types.ImageSource, digest string) error {
stream, blobSize, err := src.GetBlob(digest)
if err != nil {
return fmt.Errorf("Error reading blob %s: %v", digest, err)
// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields)
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
if len(a) != len(b) {
return true
}
defer stream.Close()
for i := range a {
if a[i].Digest != b[i].Digest {
return true
}
}
return false
}
// copyBlob copies a blob with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
// and returns a complete blobInfo of the copied blob.
func copyBlob(dest types.ImageDestination, src types.ImageSource, srcInfo types.BlobInfo, canCompress bool) (types.BlobInfo, error) {
srcStream, srcBlobSize, err := src.GetBlob(srcInfo.Digest) // We currently completely ignore srcInfo.Size throughout.
if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
}
defer srcStream.Close()
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
// use a separate validation failure indicator.
// Note that we don't use a stronger "validationSucceeded" indicator, because
// dest.PutBlob may detect that the layer already exists, in which case we don't
// read stream to the end, and validation does not happen.
digestingReader, err := newDigestingReader(stream, digest)
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
if err != nil {
return fmt.Errorf("Error preparing to verify blob %s: %v", digest, err)
return types.BlobInfo{}, fmt.Errorf("Error preparing to verify blob %s: %v", srcInfo.Digest, err)
}
if _, err := dest.PutBlob(digestingReader, types.BlobInfo{Digest: digest, Size: blobSize}); err != nil {
return fmt.Errorf("Error writing blob: %v", err)
var destStream io.Reader = digestingReader
isCompressed, destStream, err := isStreamCompressed(destStream) // We could skip this in some cases, but let's keep the code path uniform
if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error reading blob %s: %v", srcInfo.Digest, err)
}
var inputInfo types.BlobInfo
if !canCompress || isCompressed || !dest.ShouldCompressLayers() {
logrus.Debugf("Using original blob without modification")
inputInfo.Digest = srcInfo.Digest
inputInfo.Size = srcBlobSize
} else {
logrus.Debugf("Compressing blob on the fly")
pipeReader, pipeWriter := io.Pipe()
defer pipeReader.Close()
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
destStream = pipeReader
inputInfo.Digest = ""
inputInfo.Size = -1
}
uploadedInfo, err := dest.PutBlob(destStream, inputInfo)
if err != nil {
return types.BlobInfo{}, fmt.Errorf("Error writing blob: %v", err)
}
if digestingReader.validationFailed { // Coverage: This should never happen.
return fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", digest)
return types.BlobInfo{}, fmt.Errorf("Internal error uploading blob %s, digest verification failed but was ignored", srcInfo.Digest)
}
return nil
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
return types.BlobInfo{}, fmt.Errorf("Internal error uploading blob %s, blob with digest %s uploaded with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
}
return uploadedInfo, nil
}
// compressionPrefixes is an internal implementation detail of isStreamCompressed
var compressionPrefixes = map[string][]byte{
"gzip": {0x1F, 0x8B, 0x08}, // gzip (RFC 1952)
"bzip2": {0x42, 0x5A, 0x68}, // bzip2 (decompress.c:BZ2_decompress)
"xz": {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, // xz (/usr/share/doc/xz/xz-file-format.txt)
}
// isStreamCompressed returns true if input is recognized as a compressed format.
// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
func isStreamCompressed(input io.Reader) (bool, io.Reader, error) {
buffer := [8]byte{}
n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
// Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
return false, nil, err
}
isCompressed := false
for algo, prefix := range compressionPrefixes {
if bytes.HasPrefix(buffer[:n], prefix) {
logrus.Debugf("Detected compression format %s", algo)
isCompressed = true
break
}
}
if !isCompressed {
logrus.Debugf("No compression detected")
}
return isCompressed, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
}()
zipper := gzip.NewWriter(dest)
defer zipper.Close()
_, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
}

View File

@ -40,6 +40,11 @@ func (d *dirImageDestination) SupportsSignatures() error {
return nil
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *dirImageDestination) ShouldCompressLayers() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -62,6 +62,19 @@ func (d *dockerImageDestination) SupportsSignatures() error {
return fmt.Errorf("Pushing signatures to a Docker Registry is not supported")
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *dockerImageDestination) ShouldCompressLayers() bool {
return true
}
// sizeCounter is an io.Writer which only counts the total size of its input.
type sizeCounter struct{ size int64 }
func (c *sizeCounter) Write(p []byte) (n int, err error) {
c.size += int64(len(p))
return len(p), nil
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.
@ -107,7 +120,8 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
}
h := sha256.New()
tee := io.TeeReader(stream, h)
sizeCounter := &sizeCounter{}
tee := io.TeeReader(stream, io.MultiWriter(h, sizeCounter))
res, err = d.c.makeRequestToResolvedURL("PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size)
if err != nil {
logrus.Debugf("Error uploading layer chunked, response %#v", *res)
@ -139,7 +153,7 @@ func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobI
}
logrus.Debugf("Upload of layer %s complete", computedDigest)
return types.BlobInfo{Digest: computedDigest, Size: res.Request.ContentLength}, nil
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
}
func (d *dockerImageDestination) PutManifest(m []byte) error {

View File

@ -6,6 +6,7 @@ import (
"fmt"
"regexp"
"github.com/containers/image/manifest"
"github.com/containers/image/types"
)
@ -18,14 +19,14 @@ type fsLayersSchema1 struct {
}
type manifestSchema1 struct {
Name string
Tag string
FSLayers []fsLayersSchema1 `json:"fsLayers"`
History []struct {
Name string `json:"name"`
Tag string `json:"tag"`
Architecture string `json:"architecture"`
FSLayers []fsLayersSchema1 `json:"fsLayers"`
History []struct {
V1Compatibility string `json:"v1Compatibility"`
} `json:"history"`
// TODO(runcom) verify the downloaded manifest
//Signature []byte `json:"signature"`
SchemaVersion int `json:"schemaVersion"`
}
func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
@ -52,7 +53,7 @@ func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
layers := make([]types.BlobInfo, len(m.FSLayers))
for i, layer := range m.FSLayers {
for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
}
return layers
@ -81,6 +82,28 @@ func (m *manifestSchema1) ImageInspectInfo() (*types.ImageInspectInfo, error) {
}, nil
}
func (m *manifestSchema1) UpdatedManifest(options types.ManifestUpdateOptions) ([]byte, error) {
copy := *m
if options.LayerInfos != nil {
// Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
if len(copy.FSLayers) != len(options.LayerInfos) {
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
}
for i, info := range options.LayerInfos {
// (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
}
}
// docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
unsigned, err := json.Marshal(copy)
if err != nil {
return nil, err
}
return manifest.AddDummyV2S1Signature(unsigned)
}
// fixManifestLayers, after validating the supplied manifest
// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,

View File

@ -2,6 +2,7 @@ package image
import (
"encoding/json"
"fmt"
"io/ioutil"
"github.com/containers/image/types"
@ -15,6 +16,8 @@ type descriptor struct {
type manifestSchema2 struct {
src types.ImageSource
SchemaVersion int `json:"schemaVersion"`
MediaType string `json:"mediaType"`
ConfigDescriptor descriptor `json:"config"`
LayersDescriptors []descriptor `json:"layers"`
}
@ -66,3 +69,17 @@ func (m *manifestSchema2) ImageInspectInfo() (*types.ImageInspectInfo, error) {
Os: v1.OS,
}, nil
}
func (m *manifestSchema2) UpdatedManifest(options types.ManifestUpdateOptions) ([]byte, error) {
copy := *m
if options.LayerInfos != nil {
if len(copy.LayersDescriptors) != len(options.LayerInfos) {
return nil, fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
}
for i, info := range options.LayerInfos {
copy.LayersDescriptors[i].Digest = info.Digest
copy.LayersDescriptors[i].Size = info.Size
}
}
return json.Marshal(copy)
}

View File

@ -108,6 +108,7 @@ type genericManifest interface {
ConfigInfo() types.BlobInfo
LayerInfos() []types.BlobInfo
ImageInspectInfo() (*types.ImageInspectInfo, error) // The caller will need to fill in Layers
UpdatedManifest(types.ManifestUpdateOptions) ([]byte, error)
}
// getParsedManifest parses the manifest into a data structure, cleans it up, and returns it.
@ -173,3 +174,13 @@ func (i *genericImage) LayerInfos() ([]types.BlobInfo, error) {
}
return m.LayerInfos(), nil
}
// UpdatedManifest returns the image's manifest modified according to updateOptions.
// This does not change the state of the Image object.
func (i *genericImage) UpdatedManifest(options types.ManifestUpdateOptions) ([]byte, error) {
m, err := i.getParsedManifest()
if err != nil {
return nil, err
}
return m.UpdatedManifest(options)
}

View File

@ -95,3 +95,21 @@ func MatchesDigest(manifest []byte, expectedDigest string) (bool, error) {
}
return expectedDigest == actualDigest, nil
}
// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature).
func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
key, err := libtrust.GenerateECP256PrivateKey()
if err != nil {
return nil, err // Coverage: This can fail only if rand.Reader fails.
}
js, err := libtrust.NewJSONSignature(manifest)
if err != nil {
return nil, err
}
if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails.
return nil, err
}
return js.PrettySignature("signatures")
}

View File

@ -49,6 +49,11 @@ func (d *ociImageDestination) SupportsSignatures() error {
return fmt.Errorf("Pushing signatures for OCI images is not supported")
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *ociImageDestination) ShouldCompressLayers() bool {
return false
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -337,6 +337,11 @@ func (d *openshiftImageDestination) SupportsSignatures() error {
return nil
}
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
func (d *openshiftImageDestination) ShouldCompressLayers() bool {
return true
}
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
// inputInfo.Size is the expected length of stream, if known.

View File

@ -133,6 +133,8 @@ type ImageDestination interface {
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
SupportsSignatures() error
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
ShouldCompressLayers() bool
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
@ -175,6 +177,14 @@ type Image interface {
LayerInfos() ([]BlobInfo, error)
// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
Inspect() (*ImageInspectInfo, error)
// UpdatedManifest returns the image's manifest modified according to options.
// This does not change the state of the Image object.
UpdatedManifest(options ManifestUpdateOptions) ([]byte, error)
}
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
type ManifestUpdateOptions struct {
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which should replace the originals, in order (the root layer first, and then successive layered layers)
}
// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration.