mirror of
https://github.com/containers/skopeo.git
synced 2025-09-13 21:40:31 +00:00
vendor: update c/i@fb36437e0f
This change includes the docker-archive: transport, allowing for entirely local manipulation of Docker images. Signed-off-by: Aleksa Sarai <asarai@suse.de>
This commit is contained in:
57
vendor/github.com/containers/image/docker/archive/dest.go
generated
vendored
Normal file
57
vendor/github.com/containers/image/docker/archive/dest.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containers/image/docker/tarfile"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
type archiveImageDestination struct {
|
||||||
|
*tarfile.Destination // Implements most of types.ImageDestination
|
||||||
|
ref archiveReference
|
||||||
|
writer io.Closer
|
||||||
|
}
|
||||||
|
|
||||||
|
func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
|
||||||
|
if ref.destinationRef == nil {
|
||||||
|
return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form <path>:<reference:tag>)")
|
||||||
|
}
|
||||||
|
fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_EXCL|os.O_CREATE, 0644)
|
||||||
|
if err != nil {
|
||||||
|
// FIXME: It should be possible to modify archives, but the only really
|
||||||
|
// sane way of doing it is to create a copy of the image, modify
|
||||||
|
// it and then do a rename(2).
|
||||||
|
if os.IsExist(err) {
|
||||||
|
err = errors.New("docker-archive doesn't support modifying existing images")
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &archiveImageDestination{
|
||||||
|
Destination: tarfile.NewDestination(fh, ref.destinationRef),
|
||||||
|
ref: ref,
|
||||||
|
writer: fh,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
|
||||||
|
// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
|
||||||
|
func (d *archiveImageDestination) Reference() types.ImageReference {
|
||||||
|
return d.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageDestination, if any.
|
||||||
|
func (d *archiveImageDestination) Close() error {
|
||||||
|
return d.writer.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
|
// WARNING: This does not have any transactional semantics:
|
||||||
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
|
func (d *archiveImageDestination) Commit() error {
|
||||||
|
return d.Destination.Commit()
|
||||||
|
}
|
36
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
Normal file
36
vendor/github.com/containers/image/docker/archive/src.go
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/containers/image/docker/tarfile"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
type archiveImageSource struct {
|
||||||
|
*tarfile.Source // Implements most of types.ImageSource
|
||||||
|
ref archiveReference
|
||||||
|
}
|
||||||
|
|
||||||
|
// newImageSource returns a types.ImageSource for the specified image reference.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
|
||||||
|
if ref.destinationRef != nil {
|
||||||
|
logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
|
||||||
|
}
|
||||||
|
src := tarfile.NewSource(ref.path)
|
||||||
|
return &archiveImageSource{
|
||||||
|
Source: src,
|
||||||
|
ref: ref,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reference returns the reference used to set up this source, _as specified by the user_
|
||||||
|
// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
|
||||||
|
func (s *archiveImageSource) Reference() types.ImageReference {
|
||||||
|
return s.ref
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close removes resources associated with an initialized ImageSource, if any.
|
||||||
|
func (s *archiveImageSource) Close() error {
|
||||||
|
return nil
|
||||||
|
}
|
155
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
Normal file
155
vendor/github.com/containers/image/docker/archive/transport.go
generated
vendored
Normal file
@@ -0,0 +1,155 @@
|
|||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/containers/image/docker/reference"
|
||||||
|
ctrImage "github.com/containers/image/image"
|
||||||
|
"github.com/containers/image/transports"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
transports.Register(Transport)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transport is an ImageTransport for local Docker archives.
|
||||||
|
var Transport = archiveTransport{}
|
||||||
|
|
||||||
|
type archiveTransport struct{}
|
||||||
|
|
||||||
|
func (t archiveTransport) Name() string {
|
||||||
|
return "docker-archive"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
|
||||||
|
func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
|
||||||
|
return ParseReference(reference)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
|
||||||
|
// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
|
||||||
|
// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
|
||||||
|
// scope passed to this function will not be "", that value is always allowed.
|
||||||
|
func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
|
||||||
|
// See the explanation in archiveReference.PolicyConfigurationIdentity.
|
||||||
|
return errors.New(`docker-archive: does not support any scopes except the default "" one`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// archiveReference is an ImageReference for Docker images.
|
||||||
|
type archiveReference struct {
|
||||||
|
destinationRef reference.NamedTagged // only used for destinations
|
||||||
|
path string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
|
||||||
|
func ParseReference(refString string) (types.ImageReference, error) {
|
||||||
|
if refString == "" {
|
||||||
|
return nil, errors.Errorf("docker-archive reference %s isn't of the form <path>[:<reference>]", refString)
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.SplitN(refString, ":", 2)
|
||||||
|
path := parts[0]
|
||||||
|
var destinationRef reference.NamedTagged
|
||||||
|
|
||||||
|
// A :tag was specified, which is only necessary for destinations.
|
||||||
|
if len(parts) == 2 {
|
||||||
|
ref, err := reference.ParseNormalizedNamed(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "docker-archive parsing reference")
|
||||||
|
}
|
||||||
|
ref = reference.TagNameOnly(ref)
|
||||||
|
|
||||||
|
if _, isDigest := ref.(reference.Canonical); isDigest {
|
||||||
|
return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString)
|
||||||
|
}
|
||||||
|
|
||||||
|
refTagged, isTagged := ref.(reference.NamedTagged)
|
||||||
|
if !isTagged {
|
||||||
|
// Really shouldn't be hit...
|
||||||
|
return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
|
||||||
|
}
|
||||||
|
destinationRef = refTagged
|
||||||
|
}
|
||||||
|
|
||||||
|
return archiveReference{
|
||||||
|
destinationRef: destinationRef,
|
||||||
|
path: path,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ref archiveReference) Transport() types.ImageTransport {
|
||||||
|
return Transport
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringWithinTransport returns a string representation of the reference, which MUST be such that
|
||||||
|
// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
|
||||||
|
// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
|
||||||
|
// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
|
||||||
|
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||||
|
func (ref archiveReference) StringWithinTransport() string {
|
||||||
|
if ref.destinationRef == nil {
|
||||||
|
return ref.path
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// DockerReference returns a Docker reference associated with this reference
|
||||||
|
// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
|
||||||
|
// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
|
||||||
|
func (ref archiveReference) DockerReference() reference.Named {
|
||||||
|
return ref.destinationRef
|
||||||
|
}
|
||||||
|
|
||||||
|
// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
|
||||||
|
// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
|
||||||
|
// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
|
||||||
|
// (i.e. various references with exactly the same semantics should return the same configuration identity)
|
||||||
|
// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
|
||||||
|
// not required/guaranteed that it will be a valid input to Transport().ParseReference().
|
||||||
|
// Returns "" if configuration identities for these references are not supported.
|
||||||
|
func (ref archiveReference) PolicyConfigurationIdentity() string {
|
||||||
|
// Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
|
||||||
|
// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
|
||||||
|
// in order, terminating on first match, and an implicit "" is always checked at the end.
|
||||||
|
// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
|
||||||
|
// and each following element to be a prefix of the element preceding it.
|
||||||
|
func (ref archiveReference) PolicyConfigurationNamespaces() []string {
|
||||||
|
// TODO
|
||||||
|
return []string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
|
||||||
|
// The caller must call .Close() on the returned Image.
|
||||||
|
// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
|
||||||
|
// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
|
||||||
|
func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
|
||||||
|
src := newImageSource(ctx, ref)
|
||||||
|
return ctrImage.FromSource(src)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImageSource returns a types.ImageSource for this reference,
|
||||||
|
// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
|
||||||
|
// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
|
||||||
|
// The caller must call .Close() on the returned ImageSource.
|
||||||
|
func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
|
||||||
|
return newImageSource(ctx, ref), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewImageDestination returns a types.ImageDestination for this reference.
|
||||||
|
// The caller must call .Close() on the returned ImageDestination.
|
||||||
|
func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
|
||||||
|
return newImageDestination(ctx, ref)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteImage deletes the named image from the registry, if supported.
|
||||||
|
func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error {
|
||||||
|
// Not really supported, for safety reasons.
|
||||||
|
return errors.New("Deleting images not implemented for docker-archive: images")
|
||||||
|
}
|
212
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
212
vendor/github.com/containers/image/docker/daemon/daemon_dest.go
generated
vendored
@@ -1,36 +1,26 @@
|
|||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Sirupsen/logrus"
|
"github.com/Sirupsen/logrus"
|
||||||
"github.com/containers/image/docker/reference"
|
"github.com/containers/image/docker/reference"
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/docker/tarfile"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
|
|
||||||
type daemonImageDestination struct {
|
type daemonImageDestination struct {
|
||||||
ref daemonReference
|
ref daemonReference
|
||||||
namedTaggedRef reference.NamedTagged // Strictly speaking redundant with ref above; having the field makes it structurally impossible for later users to fail.
|
*tarfile.Destination // Implements most of types.ImageDestination
|
||||||
// For talking to imageLoadGoroutine
|
// For talking to imageLoadGoroutine
|
||||||
goroutineCancel context.CancelFunc
|
goroutineCancel context.CancelFunc
|
||||||
statusChannel <-chan error
|
statusChannel <-chan error
|
||||||
writer *io.PipeWriter
|
writer *io.PipeWriter
|
||||||
tar *tar.Writer
|
|
||||||
// Other state
|
// Other state
|
||||||
committed bool // writer has been closed
|
committed bool // writer has been closed
|
||||||
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// newImageDestination returns a types.ImageDestination for the specified image reference.
|
// newImageDestination returns a types.ImageDestination for the specified image reference.
|
||||||
@@ -57,13 +47,11 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t
|
|||||||
|
|
||||||
return &daemonImageDestination{
|
return &daemonImageDestination{
|
||||||
ref: ref,
|
ref: ref,
|
||||||
namedTaggedRef: namedTaggedRef,
|
Destination: tarfile.NewDestination(writer, namedTaggedRef),
|
||||||
goroutineCancel: goroutineCancel,
|
goroutineCancel: goroutineCancel,
|
||||||
statusChannel: statusChannel,
|
statusChannel: statusChannel,
|
||||||
writer: writer,
|
writer: writer,
|
||||||
tar: tar.NewWriter(writer),
|
|
||||||
committed: false,
|
committed: false,
|
||||||
blobs: make(map[digest.Digest]types.BlobInfo),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -115,205 +103,13 @@ func (d *daemonImageDestination) Reference() types.ImageReference {
|
|||||||
return d.ref
|
return d.ref
|
||||||
}
|
}
|
||||||
|
|
||||||
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
|
||||||
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
|
||||||
func (d *daemonImageDestination) SupportedManifestMIMETypes() []string {
|
|
||||||
return []string{
|
|
||||||
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
|
||||||
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
|
||||||
func (d *daemonImageDestination) SupportsSignatures() error {
|
|
||||||
return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
|
||||||
func (d *daemonImageDestination) ShouldCompressLayers() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
|
||||||
// uploaded to the image destination, true otherwise.
|
|
||||||
func (d *daemonImageDestination) AcceptsForeignLayerURLs() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
|
||||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
|
||||||
// inputInfo.Size is the expected length of stream, if known.
|
|
||||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
|
||||||
// to any other readers for download using the supplied digest.
|
|
||||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
|
||||||
func (d *daemonImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
|
||||||
if inputInfo.Digest.String() == "" {
|
|
||||||
return types.BlobInfo{}, errors.Errorf(`Can not stream a blob with unknown digest to "docker-daemon:"`)
|
|
||||||
}
|
|
||||||
|
|
||||||
ok, size, err := d.HasBlob(inputInfo)
|
|
||||||
if err != nil {
|
|
||||||
return types.BlobInfo{}, err
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
|
|
||||||
logrus.Debugf("docker-daemon: input with unknown size, streaming to disk first…")
|
|
||||||
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-blob")
|
|
||||||
if err != nil {
|
|
||||||
return types.BlobInfo{}, err
|
|
||||||
}
|
|
||||||
defer os.Remove(streamCopy.Name())
|
|
||||||
defer streamCopy.Close()
|
|
||||||
|
|
||||||
size, err := io.Copy(streamCopy, stream)
|
|
||||||
if err != nil {
|
|
||||||
return types.BlobInfo{}, err
|
|
||||||
}
|
|
||||||
_, err = streamCopy.Seek(0, os.SEEK_SET)
|
|
||||||
if err != nil {
|
|
||||||
return types.BlobInfo{}, err
|
|
||||||
}
|
|
||||||
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
|
||||||
stream = streamCopy
|
|
||||||
logrus.Debugf("… streaming done")
|
|
||||||
}
|
|
||||||
|
|
||||||
digester := digest.Canonical.Digester()
|
|
||||||
tee := io.TeeReader(stream, digester.Hash())
|
|
||||||
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
|
|
||||||
return types.BlobInfo{}, err
|
|
||||||
}
|
|
||||||
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
|
|
||||||
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
|
||||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
|
||||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
|
||||||
// it returns a non-nil error only on an unexpected failure.
|
|
||||||
func (d *daemonImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
|
||||||
if info.Digest == "" {
|
|
||||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
|
||||||
}
|
|
||||||
if blob, ok := d.blobs[info.Digest]; ok {
|
|
||||||
return true, blob.Size, nil
|
|
||||||
}
|
|
||||||
return false, -1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *daemonImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *daemonImageDestination) PutManifest(m []byte) error {
|
|
||||||
var man schema2Manifest
|
|
||||||
if err := json.Unmarshal(m, &man); err != nil {
|
|
||||||
return errors.Wrap(err, "Error parsing manifest")
|
|
||||||
}
|
|
||||||
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
|
||||||
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
|
||||||
}
|
|
||||||
|
|
||||||
layerPaths := []string{}
|
|
||||||
for _, l := range man.Layers {
|
|
||||||
layerPaths = append(layerPaths, l.Digest.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// For github.com/docker/docker consumers, this works just as well as
|
|
||||||
// refString := d.namedTaggedRef.String() [i.e. d.ref.ref.String()]
|
|
||||||
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
|
||||||
// normalizes both of them to the same value.
|
|
||||||
//
|
|
||||||
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
|
||||||
// a difference for github.com/projectatomic/docker consumers, with the
|
|
||||||
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
|
||||||
// These consumers treat reference strings which include a hostname and reference
|
|
||||||
// strings without a hostname differently.
|
|
||||||
//
|
|
||||||
// Using the host name here is more explicit about the intent, and it has the same
|
|
||||||
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
|
||||||
// a hostname-qualified reference.
|
|
||||||
// See https://github.com/containers/image/issues/72 for a more detailed
|
|
||||||
// analysis and explanation.
|
|
||||||
refString := fmt.Sprintf("%s:%s", d.namedTaggedRef.Name(), d.namedTaggedRef.Tag())
|
|
||||||
|
|
||||||
items := []manifestItem{{
|
|
||||||
Config: man.Config.Digest.String(),
|
|
||||||
RepoTags: []string{refString},
|
|
||||||
Layers: layerPaths,
|
|
||||||
Parent: "",
|
|
||||||
LayerSources: nil,
|
|
||||||
}}
|
|
||||||
itemsBytes, err := json.Marshal(&items)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// FIXME? Do we also need to support the legacy format?
|
|
||||||
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
|
|
||||||
}
|
|
||||||
|
|
||||||
type tarFI struct {
|
|
||||||
path string
|
|
||||||
size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tarFI) Name() string {
|
|
||||||
return t.path
|
|
||||||
}
|
|
||||||
func (t *tarFI) Size() int64 {
|
|
||||||
return t.size
|
|
||||||
}
|
|
||||||
func (t *tarFI) Mode() os.FileMode {
|
|
||||||
return 0444
|
|
||||||
}
|
|
||||||
func (t *tarFI) ModTime() time.Time {
|
|
||||||
return time.Unix(0, 0)
|
|
||||||
}
|
|
||||||
func (t *tarFI) IsDir() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
func (t *tarFI) Sys() interface{} {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sendFile sends a file into the tar stream.
|
|
||||||
func (d *daemonImageDestination) sendFile(path string, expectedSize int64, stream io.Reader) error {
|
|
||||||
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
logrus.Debugf("Sending as tar file %s", path)
|
|
||||||
if err := d.tar.WriteHeader(hdr); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
size, err := io.Copy(d.tar, stream)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if size != expectedSize {
|
|
||||||
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *daemonImageDestination) PutSignatures(signatures [][]byte) error {
|
|
||||||
if len(signatures) != 0 {
|
|
||||||
return errors.Errorf("Storing signatures for docker-daemon: destinations is not supported")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||||
// WARNING: This does not have any transactional semantics:
|
// WARNING: This does not have any transactional semantics:
|
||||||
// - Uploaded data MAY be visible to others before Commit() is called
|
// - Uploaded data MAY be visible to others before Commit() is called
|
||||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||||
func (d *daemonImageDestination) Commit() error {
|
func (d *daemonImageDestination) Commit() error {
|
||||||
logrus.Debugf("docker-daemon: Closing tar stream")
|
logrus.Debugf("docker-daemon: Closing tar stream")
|
||||||
if err := d.tar.Close(); err != nil {
|
if err := d.Destination.Commit(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.writer.Close(); err != nil {
|
if err := d.writer.Close(); err != nil {
|
||||||
|
325
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
325
vendor/github.com/containers/image/docker/daemon/daemon_src.go
generated
vendored
@@ -1,19 +1,13 @@
|
|||||||
package daemon
|
package daemon
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/docker/tarfile"
|
||||||
"github.com/containers/image/pkg/compression"
|
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/docker/client"
|
"github.com/docker/docker/client"
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
)
|
)
|
||||||
@@ -22,15 +16,8 @@ const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system defaul
|
|||||||
|
|
||||||
type daemonImageSource struct {
|
type daemonImageSource struct {
|
||||||
ref daemonReference
|
ref daemonReference
|
||||||
|
*tarfile.Source // Implements most of types.ImageSource
|
||||||
tarCopyPath string
|
tarCopyPath string
|
||||||
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
|
||||||
tarManifest *manifestItem // nil if not available yet.
|
|
||||||
configBytes []byte
|
|
||||||
configDigest digest.Digest
|
|
||||||
orderedDiffIDList []diffID
|
|
||||||
knownLayers map[diffID]*layerInfo
|
|
||||||
// Other state
|
|
||||||
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type layerInfo struct {
|
type layerInfo struct {
|
||||||
@@ -81,6 +68,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS
|
|||||||
succeeded = true
|
succeeded = true
|
||||||
return &daemonImageSource{
|
return &daemonImageSource{
|
||||||
ref: ref,
|
ref: ref,
|
||||||
|
Source: tarfile.NewSource(tarCopyFile.Name()),
|
||||||
tarCopyPath: tarCopyFile.Name(),
|
tarCopyPath: tarCopyFile.Name(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -95,310 +83,3 @@ func (s *daemonImageSource) Reference() types.ImageReference {
|
|||||||
func (s *daemonImageSource) Close() error {
|
func (s *daemonImageSource) Close() error {
|
||||||
return os.Remove(s.tarCopyPath)
|
return os.Remove(s.tarCopyPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
|
||||||
type tarReadCloser struct {
|
|
||||||
*tar.Reader
|
|
||||||
backingFile *os.File
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tarReadCloser) Close() error {
|
|
||||||
return t.backingFile.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
|
||||||
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
|
||||||
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarCopyPath cheap enough.
|
|
||||||
// The caller should call .Close() on the returned stream.
|
|
||||||
func (s *daemonImageSource) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
|
||||||
f, err := os.Open(s.tarCopyPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
succeeded := false
|
|
||||||
defer func() {
|
|
||||||
if !succeeded {
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
tarReader, header, err := findTarComponent(f, componentPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if header == nil {
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
|
||||||
// We follow only one symlink; so no loops are possible.
|
|
||||||
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
|
||||||
// so we don't care.
|
|
||||||
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if header == nil {
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !header.FileInfo().Mode().IsRegular() {
|
|
||||||
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
|
||||||
}
|
|
||||||
succeeded = true
|
|
||||||
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// findTarComponent returns a header and a reader matching path within inputFile,
|
|
||||||
// or (nil, nil, nil) if not found.
|
|
||||||
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
|
|
||||||
t := tar.NewReader(inputFile)
|
|
||||||
for {
|
|
||||||
h, err := t.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if h.Name == path {
|
|
||||||
return t, h, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// readTarComponent returns full contents of componentPath.
|
|
||||||
func (s *daemonImageSource) readTarComponent(path string) ([]byte, error) {
|
|
||||||
file, err := s.openTarComponent(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
bytes, err := ioutil.ReadAll(file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return bytes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
|
||||||
func (s *daemonImageSource) ensureCachedDataIsPresent() error {
|
|
||||||
if s.tarManifest != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and parse manifest.json
|
|
||||||
tarManifest, err := s.loadTarManifest()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read and parse config.
|
|
||||||
configBytes, err := s.readTarComponent(tarManifest.Config)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
var parsedConfig dockerImage // Most fields ommitted, we only care about layer DiffIDs.
|
|
||||||
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
|
||||||
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
|
||||||
}
|
|
||||||
|
|
||||||
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Success; commit.
|
|
||||||
s.tarManifest = tarManifest
|
|
||||||
s.configBytes = configBytes
|
|
||||||
s.configDigest = digest.FromBytes(configBytes)
|
|
||||||
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
|
||||||
s.knownLayers = knownLayers
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadTarManifest loads and decodes the manifest.json.
|
|
||||||
func (s *daemonImageSource) loadTarManifest() (*manifestItem, error) {
|
|
||||||
// FIXME? Do we need to deal with the legacy format?
|
|
||||||
bytes, err := s.readTarComponent(manifestFileName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var items []manifestItem
|
|
||||||
if err := json.Unmarshal(bytes, &items); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
|
||||||
}
|
|
||||||
if len(items) != 1 {
|
|
||||||
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
|
|
||||||
}
|
|
||||||
return &items[0], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *daemonImageSource) prepareLayerData(tarManifest *manifestItem, parsedConfig *dockerImage) (map[diffID]*layerInfo, error) {
|
|
||||||
// Collect layer data available in manifest and config.
|
|
||||||
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
|
||||||
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
|
||||||
}
|
|
||||||
knownLayers := map[diffID]*layerInfo{}
|
|
||||||
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
|
||||||
for i, diffID := range parsedConfig.RootFS.DiffIDs {
|
|
||||||
if _, ok := knownLayers[diffID]; ok {
|
|
||||||
// Apparently it really can happen that a single image contains the same layer diff more than once.
|
|
||||||
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
|
|
||||||
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
layerPath := tarManifest.Layers[i]
|
|
||||||
if _, ok := unknownLayerSizes[layerPath]; ok {
|
|
||||||
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
|
||||||
}
|
|
||||||
li := &layerInfo{ // A new element in each iteration
|
|
||||||
path: layerPath,
|
|
||||||
size: -1,
|
|
||||||
}
|
|
||||||
knownLayers[diffID] = li
|
|
||||||
unknownLayerSizes[layerPath] = li
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan the tar file to collect layer sizes.
|
|
||||||
file, err := os.Open(s.tarCopyPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer file.Close()
|
|
||||||
t := tar.NewReader(file)
|
|
||||||
for {
|
|
||||||
h, err := t.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if li, ok := unknownLayerSizes[h.Name]; ok {
|
|
||||||
li.size = h.Size
|
|
||||||
delete(unknownLayerSizes, h.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(unknownLayerSizes) != 0 {
|
|
||||||
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
|
||||||
}
|
|
||||||
|
|
||||||
return knownLayers, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
|
||||||
// It may use a remote (= slow) service.
|
|
||||||
func (s *daemonImageSource) GetManifest() ([]byte, string, error) {
|
|
||||||
if s.generatedManifest == nil {
|
|
||||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
m := schema2Manifest{
|
|
||||||
SchemaVersion: 2,
|
|
||||||
MediaType: manifest.DockerV2Schema2MediaType,
|
|
||||||
Config: distributionDescriptor{
|
|
||||||
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
|
||||||
Size: int64(len(s.configBytes)),
|
|
||||||
Digest: s.configDigest,
|
|
||||||
},
|
|
||||||
Layers: []distributionDescriptor{},
|
|
||||||
}
|
|
||||||
for _, diffID := range s.orderedDiffIDList {
|
|
||||||
li, ok := s.knownLayers[diffID]
|
|
||||||
if !ok {
|
|
||||||
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
|
||||||
}
|
|
||||||
m.Layers = append(m.Layers, distributionDescriptor{
|
|
||||||
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
|
||||||
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
|
||||||
Size: li.size,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
manifestBytes, err := json.Marshal(&m)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
s.generatedManifest = manifestBytes
|
|
||||||
}
|
|
||||||
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
|
|
||||||
// out of a manifest list.
|
|
||||||
func (s *daemonImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
|
||||||
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
|
|
||||||
return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
|
||||||
}
|
|
||||||
|
|
||||||
type readCloseWrapper struct {
|
|
||||||
io.Reader
|
|
||||||
closeFunc func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r readCloseWrapper) Close() error {
|
|
||||||
if r.closeFunc != nil {
|
|
||||||
return r.closeFunc()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
|
||||||
func (s *daemonImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
|
||||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
|
||||||
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
|
|
||||||
stream, err := s.openTarComponent(li.path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// In order to handle the fact that digests != diffIDs (and thus that a
|
|
||||||
// caller which is trying to verify the blob will run into problems),
|
|
||||||
// we need to decompress blobs. This is a bit ugly, but it's a
|
|
||||||
// consequence of making everything addressable by their DiffID rather
|
|
||||||
// than by their digest...
|
|
||||||
//
|
|
||||||
// In particular, because the v2s2 manifest being generated uses
|
|
||||||
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
|
||||||
// layers not their _actual_ digest. The result is that copy/... will
|
|
||||||
// be verifing a "digest" which is not the actual layer's digest (but
|
|
||||||
// is instead the DiffID).
|
|
||||||
|
|
||||||
decompressFunc, reader, err := compression.DetectCompression(stream)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
if decompressFunc != nil {
|
|
||||||
reader, err = decompressFunc(reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
newStream := readCloseWrapper{
|
|
||||||
Reader: reader,
|
|
||||||
closeFunc: stream.Close,
|
|
||||||
}
|
|
||||||
|
|
||||||
return newStream, li.size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
|
||||||
func (s *daemonImageSource) GetSignatures() ([][]byte, error) {
|
|
||||||
return [][]byte{}, nil
|
|
||||||
}
|
|
||||||
|
250
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
Normal file
250
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
package tarfile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/containers/image/docker/reference"
|
||||||
|
"github.com/containers/image/manifest"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
|
||||||
|
|
||||||
|
// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
|
||||||
|
type Destination struct {
|
||||||
|
writer io.Writer
|
||||||
|
tar *tar.Writer
|
||||||
|
repoTag string
|
||||||
|
// Other state.
|
||||||
|
blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDestination returns a tarfile.Destination for the specified io.Writer.
|
||||||
|
func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
|
||||||
|
// For github.com/docker/docker consumers, this works just as well as
|
||||||
|
// refString := ref.String()
|
||||||
|
// because when reading the RepoTags strings, github.com/docker/docker/reference
|
||||||
|
// normalizes both of them to the same value.
|
||||||
|
//
|
||||||
|
// Doing it this way to include the normalized-out `docker.io[/library]` does make
|
||||||
|
// a difference for github.com/projectatomic/docker consumers, with the
|
||||||
|
// “Add --add-registry and --block-registry options to docker daemon” patch.
|
||||||
|
// These consumers treat reference strings which include a hostname and reference
|
||||||
|
// strings without a hostname differently.
|
||||||
|
//
|
||||||
|
// Using the host name here is more explicit about the intent, and it has the same
|
||||||
|
// effect as (docker pull) in projectatomic/docker, which tags the result using
|
||||||
|
// a hostname-qualified reference.
|
||||||
|
// See https://github.com/containers/image/issues/72 for a more detailed
|
||||||
|
// analysis and explanation.
|
||||||
|
refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag())
|
||||||
|
return &Destination{
|
||||||
|
writer: dest,
|
||||||
|
tar: tar.NewWriter(dest),
|
||||||
|
repoTag: refString,
|
||||||
|
blobs: make(map[digest.Digest]types.BlobInfo),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportedManifestMIMETypes tells which manifest mime types the destination supports
|
||||||
|
// If an empty slice or nil it's returned, then any mime type can be tried to upload
|
||||||
|
func (d *Destination) SupportedManifestMIMETypes() []string {
|
||||||
|
return []string{
|
||||||
|
manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
|
||||||
|
// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
|
||||||
|
func (d *Destination) SupportsSignatures() error {
|
||||||
|
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
|
||||||
|
func (d *Destination) ShouldCompressLayers() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
|
||||||
|
// uploaded to the image destination, true otherwise.
|
||||||
|
func (d *Destination) AcceptsForeignLayerURLs() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||||
|
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||||
|
// inputInfo.Size is the expected length of stream, if known.
|
||||||
|
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||||
|
// to any other readers for download using the supplied digest.
|
||||||
|
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||||
|
func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
if inputInfo.Digest.String() == "" {
|
||||||
|
return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile")
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, size, err := d.HasBlob(inputInfo)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||||
|
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
|
||||||
|
streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
defer os.Remove(streamCopy.Name())
|
||||||
|
defer streamCopy.Close()
|
||||||
|
|
||||||
|
size, err := io.Copy(streamCopy, stream)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
_, err = streamCopy.Seek(0, os.SEEK_SET)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
|
||||||
|
stream = streamCopy
|
||||||
|
logrus.Debugf("... streaming done")
|
||||||
|
}
|
||||||
|
|
||||||
|
digester := digest.Canonical.Digester()
|
||||||
|
tee := io.TeeReader(stream, digester.Hash())
|
||||||
|
if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
|
||||||
|
return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasBlob returns true iff the image destination already contains a blob with
|
||||||
|
// the matching digest which can be reapplied using ReapplyBlob. Unlike
|
||||||
|
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
|
||||||
|
// the blob must also be returned. If the destination does not contain the
|
||||||
|
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
|
||||||
|
// returns a non-nil error only on an unexpected failure.
|
||||||
|
func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) {
|
||||||
|
if info.Digest == "" {
|
||||||
|
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
|
||||||
|
}
|
||||||
|
if blob, ok := d.blobs[info.Digest]; ok {
|
||||||
|
return true, blob.Size, nil
|
||||||
|
}
|
||||||
|
return false, -1, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReapplyBlob informs the image destination that a blob for which HasBlob
|
||||||
|
// previously returned true would have been passed to PutBlob if it had
|
||||||
|
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
|
||||||
|
// empty. If the blob is a filesystem layer, this signifies that the changes
|
||||||
|
// it describes need to be applied again when composing a filesystem tree.
|
||||||
|
func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutManifest sends the given manifest blob to the destination.
|
||||||
|
// FIXME? This should also receive a MIME type if known, to differentiate
|
||||||
|
// between schema versions.
|
||||||
|
func (d *Destination) PutManifest(m []byte) error {
|
||||||
|
var man schema2Manifest
|
||||||
|
if err := json.Unmarshal(m, &man); err != nil {
|
||||||
|
return errors.Wrap(err, "Error parsing manifest")
|
||||||
|
}
|
||||||
|
if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
|
||||||
|
return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
|
||||||
|
}
|
||||||
|
|
||||||
|
layerPaths := []string{}
|
||||||
|
for _, l := range man.Layers {
|
||||||
|
layerPaths = append(layerPaths, l.Digest.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
items := []manifestItem{{
|
||||||
|
Config: man.Config.Digest.String(),
|
||||||
|
RepoTags: []string{d.repoTag},
|
||||||
|
Layers: layerPaths,
|
||||||
|
Parent: "",
|
||||||
|
LayerSources: nil,
|
||||||
|
}}
|
||||||
|
itemsBytes, err := json.Marshal(&items)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIXME? Do we also need to support the legacy format?
|
||||||
|
return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
type tarFI struct {
|
||||||
|
path string
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tarFI) Name() string {
|
||||||
|
return t.path
|
||||||
|
}
|
||||||
|
func (t *tarFI) Size() int64 {
|
||||||
|
return t.size
|
||||||
|
}
|
||||||
|
func (t *tarFI) Mode() os.FileMode {
|
||||||
|
return 0444
|
||||||
|
}
|
||||||
|
func (t *tarFI) ModTime() time.Time {
|
||||||
|
return time.Unix(0, 0)
|
||||||
|
}
|
||||||
|
func (t *tarFI) IsDir() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
func (t *tarFI) Sys() interface{} {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendFile sends a file into the tar stream.
|
||||||
|
func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
|
||||||
|
hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logrus.Debugf("Sending as tar file %s", path)
|
||||||
|
if err := d.tar.WriteHeader(hdr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
size, err := io.Copy(d.tar, stream)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if size != expectedSize {
|
||||||
|
return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PutSignatures adds the given signatures to the docker tarfile (currently not
|
||||||
|
// supported). MUST be called after PutManifest (signatures reference manifest
|
||||||
|
// contents)
|
||||||
|
func (d *Destination) PutSignatures(signatures [][]byte) error {
|
||||||
|
if len(signatures) != 0 {
|
||||||
|
return errors.Errorf("Storing signatures for docker tar files is not supported")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Commit finishes writing data to the underlying io.Writer.
|
||||||
|
// It is the caller's responsibility to close it, if necessary.
|
||||||
|
func (d *Destination) Commit() error {
|
||||||
|
return d.tar.Close()
|
||||||
|
}
|
3
vendor/github.com/containers/image/docker/tarfile/doc.go
generated
vendored
Normal file
3
vendor/github.com/containers/image/docker/tarfile/doc.go
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
// Package tarfile is an internal implementation detail of some transports.
|
||||||
|
// Do not use outside of the github.com/containers/image repo!
|
||||||
|
package tarfile
|
352
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
Normal file
352
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
Normal file
@@ -0,0 +1,352 @@
|
|||||||
|
package tarfile
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/containers/image/manifest"
|
||||||
|
"github.com/containers/image/pkg/compression"
|
||||||
|
"github.com/containers/image/types"
|
||||||
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Source is a partial implementation of types.ImageSource for reading from tarPath.
|
||||||
|
type Source struct {
|
||||||
|
tarPath string
|
||||||
|
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
||||||
|
tarManifest *manifestItem // nil if not available yet.
|
||||||
|
configBytes []byte
|
||||||
|
configDigest digest.Digest
|
||||||
|
orderedDiffIDList []diffID
|
||||||
|
knownLayers map[diffID]*layerInfo
|
||||||
|
// Other state
|
||||||
|
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
|
||||||
|
}
|
||||||
|
|
||||||
|
type layerInfo struct {
|
||||||
|
path string
|
||||||
|
size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSource returns a tarfile.Source for the specified path.
|
||||||
|
func NewSource(path string) *Source {
|
||||||
|
// TODO: We could add support for multiple images in a single archive, so
|
||||||
|
// that people could use docker-archive:opensuse.tar:opensuse:leap as
|
||||||
|
// the source of an image.
|
||||||
|
return &Source{
|
||||||
|
tarPath: path,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
||||||
|
type tarReadCloser struct {
|
||||||
|
*tar.Reader
|
||||||
|
backingFile *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *tarReadCloser) Close() error {
|
||||||
|
return t.backingFile.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
||||||
|
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
||||||
|
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
|
||||||
|
// The caller should call .Close() on the returned stream.
|
||||||
|
func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
||||||
|
f, err := os.Open(s.tarPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
succeeded := false
|
||||||
|
defer func() {
|
||||||
|
if !succeeded {
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
tarReader, header, err := findTarComponent(f, componentPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if header == nil {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
||||||
|
// We follow only one symlink; so no loops are possible.
|
||||||
|
if _, err := f.Seek(0, os.SEEK_SET); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
||||||
|
// so we don't care.
|
||||||
|
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if header == nil {
|
||||||
|
return nil, os.ErrNotExist
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !header.FileInfo().Mode().IsRegular() {
|
||||||
|
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
||||||
|
}
|
||||||
|
succeeded = true
|
||||||
|
return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// findTarComponent returns a header and a reader matching path within inputFile,
|
||||||
|
// or (nil, nil, nil) if not found.
|
||||||
|
func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
|
||||||
|
t := tar.NewReader(inputFile)
|
||||||
|
for {
|
||||||
|
h, err := t.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if h.Name == path {
|
||||||
|
return t, h, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// readTarComponent returns full contents of componentPath.
|
||||||
|
func (s *Source) readTarComponent(path string) ([]byte, error) {
|
||||||
|
file, err := s.openTarComponent(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
bytes, err := ioutil.ReadAll(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return bytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
||||||
|
func (s *Source) ensureCachedDataIsPresent() error {
|
||||||
|
if s.tarManifest != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and parse manifest.json
|
||||||
|
tarManifest, err := s.loadTarManifest()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read and parse config.
|
||||||
|
configBytes, err := s.readTarComponent(tarManifest.Config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
|
||||||
|
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
||||||
|
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
||||||
|
}
|
||||||
|
|
||||||
|
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success; commit.
|
||||||
|
s.tarManifest = tarManifest
|
||||||
|
s.configBytes = configBytes
|
||||||
|
s.configDigest = digest.FromBytes(configBytes)
|
||||||
|
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
||||||
|
s.knownLayers = knownLayers
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadTarManifest loads and decodes the manifest.json.
|
||||||
|
func (s *Source) loadTarManifest() (*manifestItem, error) {
|
||||||
|
// FIXME? Do we need to deal with the legacy format?
|
||||||
|
bytes, err := s.readTarComponent(manifestFileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var items []manifestItem
|
||||||
|
if err := json.Unmarshal(bytes, &items); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
||||||
|
}
|
||||||
|
if len(items) != 1 {
|
||||||
|
return nil, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(items))
|
||||||
|
}
|
||||||
|
return &items[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Source) prepareLayerData(tarManifest *manifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
|
||||||
|
// Collect layer data available in manifest and config.
|
||||||
|
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
||||||
|
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
||||||
|
}
|
||||||
|
knownLayers := map[diffID]*layerInfo{}
|
||||||
|
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
||||||
|
for i, diffID := range parsedConfig.RootFS.DiffIDs {
|
||||||
|
if _, ok := knownLayers[diffID]; ok {
|
||||||
|
// Apparently it really can happen that a single image contains the same layer diff more than once.
|
||||||
|
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
|
||||||
|
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
layerPath := tarManifest.Layers[i]
|
||||||
|
if _, ok := unknownLayerSizes[layerPath]; ok {
|
||||||
|
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
||||||
|
}
|
||||||
|
li := &layerInfo{ // A new element in each iteration
|
||||||
|
path: layerPath,
|
||||||
|
size: -1,
|
||||||
|
}
|
||||||
|
knownLayers[diffID] = li
|
||||||
|
unknownLayerSizes[layerPath] = li
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan the tar file to collect layer sizes.
|
||||||
|
file, err := os.Open(s.tarPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
t := tar.NewReader(file)
|
||||||
|
for {
|
||||||
|
h, err := t.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if li, ok := unknownLayerSizes[h.Name]; ok {
|
||||||
|
li.size = h.Size
|
||||||
|
delete(unknownLayerSizes, h.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(unknownLayerSizes) != 0 {
|
||||||
|
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
||||||
|
}
|
||||||
|
|
||||||
|
return knownLayers, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
||||||
|
// It may use a remote (= slow) service.
|
||||||
|
func (s *Source) GetManifest() ([]byte, string, error) {
|
||||||
|
if s.generatedManifest == nil {
|
||||||
|
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
m := schema2Manifest{
|
||||||
|
SchemaVersion: 2,
|
||||||
|
MediaType: manifest.DockerV2Schema2MediaType,
|
||||||
|
Config: distributionDescriptor{
|
||||||
|
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
||||||
|
Size: int64(len(s.configBytes)),
|
||||||
|
Digest: s.configDigest,
|
||||||
|
},
|
||||||
|
Layers: []distributionDescriptor{},
|
||||||
|
}
|
||||||
|
for _, diffID := range s.orderedDiffIDList {
|
||||||
|
li, ok := s.knownLayers[diffID]
|
||||||
|
if !ok {
|
||||||
|
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
||||||
|
}
|
||||||
|
m.Layers = append(m.Layers, distributionDescriptor{
|
||||||
|
Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
|
||||||
|
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
||||||
|
Size: li.size,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
manifestBytes, err := json.Marshal(&m)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
s.generatedManifest = manifestBytes
|
||||||
|
}
|
||||||
|
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
|
||||||
|
// out of a manifest list.
|
||||||
|
func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
|
||||||
|
// How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
|
||||||
|
return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
||||||
|
}
|
||||||
|
|
||||||
|
type readCloseWrapper struct {
|
||||||
|
io.Reader
|
||||||
|
closeFunc func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r readCloseWrapper) Close() error {
|
||||||
|
if r.closeFunc != nil {
|
||||||
|
return r.closeFunc()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||||
|
func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||||
|
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
||||||
|
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
|
||||||
|
stream, err := s.openTarComponent(li.path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// In order to handle the fact that digests != diffIDs (and thus that a
|
||||||
|
// caller which is trying to verify the blob will run into problems),
|
||||||
|
// we need to decompress blobs. This is a bit ugly, but it's a
|
||||||
|
// consequence of making everything addressable by their DiffID rather
|
||||||
|
// than by their digest...
|
||||||
|
//
|
||||||
|
// In particular, because the v2s2 manifest being generated uses
|
||||||
|
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
||||||
|
// layers not their _actual_ digest. The result is that copy/... will
|
||||||
|
// be verifing a "digest" which is not the actual layer's digest (but
|
||||||
|
// is instead the DiffID).
|
||||||
|
|
||||||
|
decompressFunc, reader, err := compression.DetectCompression(stream)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
if decompressFunc != nil {
|
||||||
|
reader, err = decompressFunc(reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newStream := readCloseWrapper{
|
||||||
|
Reader: reader,
|
||||||
|
closeFunc: stream.Close,
|
||||||
|
}
|
||||||
|
|
||||||
|
return newStream, li.size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||||
|
func (s *Source) GetSignatures() ([][]byte, error) {
|
||||||
|
return [][]byte{}, nil
|
||||||
|
}
|
@@ -1,4 +1,4 @@
|
|||||||
package daemon
|
package tarfile
|
||||||
|
|
||||||
import "github.com/opencontainers/go-digest"
|
import "github.com/opencontainers/go-digest"
|
||||||
|
|
||||||
@@ -43,7 +43,7 @@ type schema2Manifest struct {
|
|||||||
|
|
||||||
// Based on github.com/docker/docker/image/image.go
|
// Based on github.com/docker/docker/image/image.go
|
||||||
// MOST CONTENT OMITTED AS UNNECESSARY
|
// MOST CONTENT OMITTED AS UNNECESSARY
|
||||||
type dockerImage struct {
|
type image struct {
|
||||||
RootFS *rootFS `json:"rootfs,omitempty"`
|
RootFS *rootFS `json:"rootfs,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/containers/image/image/manifest.go
generated
vendored
2
vendor/github.com/containers/image/image/manifest.go
generated
vendored
@@ -4,8 +4,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containers/image/manifest"
|
"github.com/containers/image/manifest"
|
||||||
|
"github.com/containers/image/pkg/strslice"
|
||||||
"github.com/containers/image/types"
|
"github.com/containers/image/types"
|
||||||
"github.com/docker/docker/api/types/strslice"
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
1
vendor/github.com/containers/image/pkg/strslice/README.md
generated
vendored
Normal file
1
vendor/github.com/containers/image/pkg/strslice/README.md
generated
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).
|
30
vendor/github.com/containers/image/pkg/strslice/strslice.go
generated
vendored
Normal file
30
vendor/github.com/containers/image/pkg/strslice/strslice.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package strslice
|
||||||
|
|
||||||
|
import "encoding/json"
|
||||||
|
|
||||||
|
// StrSlice represents a string or an array of strings.
|
||||||
|
// We need to override the json decoder to accept both options.
|
||||||
|
type StrSlice []string
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
|
||||||
|
// strings. This method is needed to implement json.Unmarshaler.
|
||||||
|
func (e *StrSlice) UnmarshalJSON(b []byte) error {
|
||||||
|
if len(b) == 0 {
|
||||||
|
// With no input, we preserve the existing value by returning nil and
|
||||||
|
// leaving the target alone. This allows defining default values for
|
||||||
|
// the type.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
p := make([]string, 0, 1)
|
||||||
|
if err := json.Unmarshal(b, &p); err != nil {
|
||||||
|
var s string
|
||||||
|
if err := json.Unmarshal(b, &s); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p = append(p, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
*e = p
|
||||||
|
return nil
|
||||||
|
}
|
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
1
vendor/github.com/containers/image/transports/alltransports/alltransports.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
|||||||
// a transport.
|
// a transport.
|
||||||
_ "github.com/containers/image/directory"
|
_ "github.com/containers/image/directory"
|
||||||
_ "github.com/containers/image/docker"
|
_ "github.com/containers/image/docker"
|
||||||
|
_ "github.com/containers/image/docker/archive"
|
||||||
_ "github.com/containers/image/docker/daemon"
|
_ "github.com/containers/image/docker/daemon"
|
||||||
_ "github.com/containers/image/oci/layout"
|
_ "github.com/containers/image/oci/layout"
|
||||||
_ "github.com/containers/image/openshift"
|
_ "github.com/containers/image/openshift"
|
||||||
|
31
vendor/github.com/containers/image/vendor.conf
generated
vendored
Normal file
31
vendor/github.com/containers/image/vendor.conf
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
github.com/Sirupsen/logrus 7f4b1adc791766938c29457bed0703fb9134421a
|
||||||
|
github.com/containers/storage 5cbbc6bafb45bd7ef10486b673deb3b81bb3b787
|
||||||
|
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||||
|
github.com/docker/distribution df5327f76fb6468b84a87771e361762b8be23fdb
|
||||||
|
github.com/docker/docker 75843d36aa5c3eaade50da005f9e0ff2602f3d5e
|
||||||
|
github.com/docker/go-connections 7da10c8c50cad14494ec818dcdfb6506265c0086
|
||||||
|
github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
|
||||||
|
github.com/docker/libtrust aabc10ec26b754e797f9028f4589c5b7bd90dc20
|
||||||
|
github.com/ghodss/yaml 04f313413ffd65ce25f2541bfd2b2ceec5c0908c
|
||||||
|
github.com/gorilla/context 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
|
||||||
|
github.com/gorilla/mux 94e7d24fd285520f3d12ae998f7fdd6b5393d453
|
||||||
|
github.com/imdario/mergo 50d4dbd4eb0e84778abe37cefef140271d96fade
|
||||||
|
github.com/mattn/go-runewidth 14207d285c6c197daabb5c9793d63e7af9ab2d50
|
||||||
|
github.com/mattn/go-shellwords 005a0944d84452842197c2108bd9168ced206f78
|
||||||
|
github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||||
|
github.com/mtrmac/gpgme b2432428689ca58c2b8e8dea9449d3295cf96fc9
|
||||||
|
github.com/opencontainers/go-digest aa2ec055abd10d26d539eb630a92241b781ce4bc
|
||||||
|
github.com/opencontainers/image-spec v1.0.0-rc4
|
||||||
|
github.com/opencontainers/runc 6b1d0e76f239ffb435445e5ae316d2676c07c6e3
|
||||||
|
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||||
|
github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302
|
||||||
|
github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2
|
||||||
|
github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987
|
||||||
|
github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721
|
||||||
|
golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8
|
||||||
|
golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe
|
||||||
|
golang.org/x/sys 075e574b89e4c2d22f2286a7e2b919519c6f3547
|
||||||
|
gopkg.in/cheggaaa/pb.v1 d7e6ca3010b6f084d8056847f55d7f572f180678
|
||||||
|
gopkg.in/yaml.v2 a3f3340b5840cee44f372bddb5880fcbc419b46a
|
||||||
|
k8s.io/client-go bcde30fb7eaed76fd98a36b4120321b94995ffb6
|
||||||
|
github.com/xeipuuv/gojsonschema master
|
Reference in New Issue
Block a user