mirror of
https://github.com/containers/skopeo.git
synced 2025-06-01 11:15:36 +00:00
[release-1.11] CVE-2024-3727
Addresses CVE-2024-3727 by bumping c/common to v0.51.4 and c/image to v5.24.3 Fixes: https://issues.redhat.com/browse/OCPBUGS-37020 https://issues.redhat.com/browse/OCPBUGS-37022 https://issues.redhat.com/browse/OCPBUGS-37023 Signed-off-by: tomsweeneyredhat <tsweeney@redhat.com>
This commit is contained in:
parent
78dc389125
commit
7f996f3bdb
4
go.mod
4
go.mod
@ -3,8 +3,8 @@ module github.com/containers/skopeo
|
|||||||
go 1.17
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/containers/common v0.51.0
|
github.com/containers/common v0.51.4
|
||||||
github.com/containers/image/v5 v5.24.1
|
github.com/containers/image/v5 v5.24.3
|
||||||
github.com/containers/ocicrypt v1.1.10
|
github.com/containers/ocicrypt v1.1.10
|
||||||
github.com/containers/storage v1.45.3
|
github.com/containers/storage v1.45.3
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
|
8
go.sum
8
go.sum
@ -212,10 +212,10 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ
|
|||||||
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||||
github.com/containers/common v0.51.0 h1:Ax4YHNTG8cEPHZJcMYRoP7sfBgOISceeyOvmZzmSucg=
|
github.com/containers/common v0.51.4 h1:1m3D9lPYgY7sS4Xod962rCEZTsOlR2nuAbYFhzopME4=
|
||||||
github.com/containers/common v0.51.0/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM=
|
github.com/containers/common v0.51.4/go.mod h1:CVSTmQWOs6IbjOZW7ik+7QggrOR3gzKc6gqYfRipl1c=
|
||||||
github.com/containers/image/v5 v5.24.1 h1:XaRw3FJmvZtI297uBVTJluUVH4AQJ//YpHviaOw0C4M=
|
github.com/containers/image/v5 v5.24.3 h1:IKrt9qWFqLkvu7trjH5XOKjkCdJ3y5vdcriOcm7j3GM=
|
||||||
github.com/containers/image/v5 v5.24.1/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc=
|
github.com/containers/image/v5 v5.24.3/go.mod h1:oss5F6ssGQz8ZtC79oY+fuzYA3m3zBek9tq9gmhuvHc=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||||
|
24
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
24
vendor/github.com/containers/common/pkg/auth/auth.go
generated
vendored
@ -19,6 +19,23 @@ import (
|
|||||||
terminal "golang.org/x/term"
|
terminal "golang.org/x/term"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// ErrNewCredentialsInvalid means that the new user-provided credentials are
|
||||||
|
// not accepted by the registry.
|
||||||
|
type ErrNewCredentialsInvalid struct {
|
||||||
|
underlyingError error
|
||||||
|
message string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error returns the error message as a string.
|
||||||
|
func (e ErrNewCredentialsInvalid) Error() string {
|
||||||
|
return e.message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap returns the underlying error.
|
||||||
|
func (e ErrNewCredentialsInvalid) Unwrap() error {
|
||||||
|
return e.underlyingError
|
||||||
|
}
|
||||||
|
|
||||||
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default
|
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default
|
||||||
// --authfile path used in multiple --authfile flag definitions
|
// --authfile path used in multiple --authfile flag definitions
|
||||||
// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set
|
// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set
|
||||||
@ -143,6 +160,7 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
|
if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil {
|
||||||
|
if !opts.NoWriteBack {
|
||||||
// Write the new credentials to the authfile
|
// Write the new credentials to the authfile
|
||||||
desc, err := config.SetCredentials(systemContext, key, username, password)
|
desc, err := config.SetCredentials(systemContext, key, username, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -152,13 +170,15 @@ func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginO
|
|||||||
fmt.Fprintln(opts.Stdout, "Used: ", desc)
|
fmt.Fprintln(opts.Stdout, "Used: ", desc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err == nil {
|
|
||||||
fmt.Fprintln(opts.Stdout, "Login Succeeded!")
|
fmt.Fprintln(opts.Stdout, "Login Succeeded!")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
|
if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok {
|
||||||
logrus.Debugf("error logging into %q: %v", key, unauthorized)
|
logrus.Debugf("error logging into %q: %v", key, unauthorized)
|
||||||
return fmt.Errorf("logging into %q: invalid username/password", key)
|
return ErrNewCredentialsInvalid{
|
||||||
|
underlyingError: err,
|
||||||
|
message: fmt.Sprintf("logging into %q: invalid username/password", key),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("authenticating creds for %q: %w", key, err)
|
return fmt.Errorf("authenticating creds for %q: %w", key, err)
|
||||||
}
|
}
|
||||||
|
1
vendor/github.com/containers/common/pkg/auth/cli.go
generated
vendored
1
vendor/github.com/containers/common/pkg/auth/cli.go
generated
vendored
@ -26,6 +26,7 @@ type LoginOptions struct {
|
|||||||
Stdin io.Reader // set to os.Stdin
|
Stdin io.Reader // set to os.Stdin
|
||||||
Stdout io.Writer // set to os.Stdout
|
Stdout io.Writer // set to os.Stdout
|
||||||
AcceptUnspecifiedRegistry bool // set to true if allows login with unspecified registry
|
AcceptUnspecifiedRegistry bool // set to true if allows login with unspecified registry
|
||||||
|
NoWriteBack bool // set to true to not write the credentials to the authfile/cred helpers
|
||||||
}
|
}
|
||||||
|
|
||||||
// LogoutOptions represents the results for flags in logout
|
// LogoutOptions represents the results for flags in logout
|
||||||
|
39
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
39
vendor/github.com/containers/image/v5/copy/copy.go
generated
vendored
@ -1085,7 +1085,10 @@ func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
|
|||||||
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
|
||||||
progressPool := ic.c.newProgressPool()
|
progressPool := ic.c.newProgressPool()
|
||||||
defer progressPool.Wait()
|
defer progressPool.Wait()
|
||||||
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
bar, err := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
defer bar.Abort(false)
|
defer bar.Abort(false)
|
||||||
ic.c.printCopyInfo("config", srcInfo)
|
ic.c.printCopyInfo("config", srcInfo)
|
||||||
|
|
||||||
@ -1177,11 +1180,17 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
}
|
}
|
||||||
if reused {
|
if reused {
|
||||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||||
func() { // A scope for defer
|
if err := func() error { // A scope for defer
|
||||||
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
|
bar, err := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: blobInfo.Digest, Size: 0}, "blob", "skipped: already exists")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
defer bar.Abort(false)
|
defer bar.Abort(false)
|
||||||
bar.mark100PercentComplete()
|
bar.mark100PercentComplete()
|
||||||
}()
|
return nil
|
||||||
|
}(); err != nil {
|
||||||
|
return types.BlobInfo{}, "", err
|
||||||
|
}
|
||||||
|
|
||||||
// Throw an event that the layer has been skipped
|
// Throw an event that the layer has been skipped
|
||||||
if ic.c.progress != nil && ic.c.progressInterval > 0 {
|
if ic.c.progress != nil && ic.c.progressInterval > 0 {
|
||||||
@ -1212,8 +1221,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
// Attempt a partial only when the source allows to retrieve a blob partially and
|
// Attempt a partial only when the source allows to retrieve a blob partially and
|
||||||
// the destination has support for it.
|
// the destination has support for it.
|
||||||
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
|
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
|
||||||
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
|
reused, blobInfo, err := func() (bool, types.BlobInfo, error) { // A scope for defer
|
||||||
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
|
bar, err := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
|
||||||
|
if err != nil {
|
||||||
|
return false, types.BlobInfo{}, err
|
||||||
|
}
|
||||||
hideProgressBar := true
|
hideProgressBar := true
|
||||||
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
|
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
|
||||||
bar.Abort(hideProgressBar)
|
bar.Abort(hideProgressBar)
|
||||||
@ -1231,18 +1243,25 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
|||||||
bar.mark100PercentComplete()
|
bar.mark100PercentComplete()
|
||||||
hideProgressBar = false
|
hideProgressBar = false
|
||||||
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
|
||||||
return true, info
|
return true, info, nil
|
||||||
}
|
}
|
||||||
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
logrus.Debugf("Failed to retrieve partial blob: %v", err)
|
||||||
return false, types.BlobInfo{}
|
return false, types.BlobInfo{}, nil
|
||||||
}(); reused {
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, "", err
|
||||||
|
}
|
||||||
|
if reused {
|
||||||
return blobInfo, cachedDiffID, nil
|
return blobInfo, cachedDiffID, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||||
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
|
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
|
||||||
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
|
bar, err := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, "", err
|
||||||
|
}
|
||||||
defer bar.Abort(false)
|
defer bar.Abort(false)
|
||||||
|
|
||||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||||
|
7
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
7
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
@ -48,10 +48,13 @@ type progressBar struct {
|
|||||||
// As a convention, most users of progress bars should call mark100PercentComplete on full success;
|
// As a convention, most users of progress bars should call mark100PercentComplete on full success;
|
||||||
// by convention, we don't leave progress bars in partial state when fully done
|
// by convention, we don't leave progress bars in partial state when fully done
|
||||||
// (even if we copied much less data than anticipated).
|
// (even if we copied much less data than anticipated).
|
||||||
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) *progressBar {
|
func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.BlobInfo, kind string, onComplete string) (*progressBar, error) {
|
||||||
// shortDigestLen is the length of the digest used for blobs.
|
// shortDigestLen is the length of the digest used for blobs.
|
||||||
const shortDigestLen = 12
|
const shortDigestLen = 12
|
||||||
|
|
||||||
|
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
prefix := fmt.Sprintf("Copying %s %s", kind, info.Digest.Encoded())
|
||||||
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
// Truncate the prefix (chopping of some part of the digest) to make all progress bars aligned in a column.
|
||||||
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
maxPrefixLen := len("Copying blob ") + shortDigestLen
|
||||||
@ -99,7 +102,7 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types.
|
|||||||
return &progressBar{
|
return &progressBar{
|
||||||
Bar: bar,
|
Bar: bar,
|
||||||
originalSize: info.Size,
|
originalSize: info.Size,
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// printCopyInfo prints a "Copying ..." message on the copier if the output is
|
// printCopyInfo prints a "Copying ..." message on the copier if the output is
|
||||||
|
22
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
22
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -173,7 +173,10 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
blobPath := d.ref.layerPath(blobDigest)
|
blobPath, err := d.ref.layerPath(blobDigest)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||||
blobFile.Close()
|
blobFile.Close()
|
||||||
explicitClosed = true
|
explicitClosed = true
|
||||||
@ -195,7 +198,10 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
|
|||||||
if info.Digest == "" {
|
if info.Digest == "" {
|
||||||
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
|
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
|
||||||
}
|
}
|
||||||
blobPath := d.ref.layerPath(info.Digest)
|
blobPath, err := d.ref.layerPath(info.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return false, types.BlobInfo{}, err
|
||||||
|
}
|
||||||
finfo, err := os.Stat(blobPath)
|
finfo, err := os.Stat(blobPath)
|
||||||
if err != nil && os.IsNotExist(err) {
|
if err != nil && os.IsNotExist(err) {
|
||||||
return false, types.BlobInfo{}, nil
|
return false, types.BlobInfo{}, nil
|
||||||
@ -215,7 +221,11 @@ func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, inf
|
|||||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||||
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
|
||||||
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
|
func (d *dirImageDestination) PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error {
|
||||||
return os.WriteFile(d.ref.manifestPath(instanceDigest), manifest, 0644)
|
path, err := d.ref.manifestPath(instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return os.WriteFile(path, manifest, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
// PutSignaturesWithFormat writes a set of signatures to the destination.
|
||||||
@ -228,7 +238,11 @@ func (d *dirImageDestination) PutSignaturesWithFormat(ctx context.Context, signa
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(d.ref.signaturePath(i, instanceDigest), blob, 0644); err != nil {
|
path, err := d.ref.signaturePath(i, instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(path, blob, 0644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
17
vendor/github.com/containers/image/v5/directory/directory_src.go
generated
vendored
17
vendor/github.com/containers/image/v5/directory/directory_src.go
generated
vendored
@ -55,7 +55,11 @@ func (s *dirImageSource) Close() error {
|
|||||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
||||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||||
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||||
m, err := os.ReadFile(s.ref.manifestPath(instanceDigest))
|
path, err := s.ref.manifestPath(instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
m, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
@ -66,7 +70,11 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
|||||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||||
r, err := os.Open(s.ref.layerPath(info.Digest))
|
path, err := s.ref.layerPath(info.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
r, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, err
|
return nil, -1, err
|
||||||
}
|
}
|
||||||
@ -84,7 +92,10 @@ func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache
|
|||||||
func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
func (s *dirImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||||
signatures := []signature.Signature{}
|
signatures := []signature.Signature{}
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
path := s.ref.signaturePath(i, instanceDigest)
|
path, err := s.ref.signaturePath(i, instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
sigBlob, err := os.ReadFile(path)
|
sigBlob, err := os.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
|
25
vendor/github.com/containers/image/v5/directory/directory_transport.go
generated
vendored
25
vendor/github.com/containers/image/v5/directory/directory_transport.go
generated
vendored
@ -161,25 +161,34 @@ func (ref dirReference) DeleteImage(ctx context.Context, sys *types.SystemContex
|
|||||||
}
|
}
|
||||||
|
|
||||||
// manifestPath returns a path for the manifest within a directory using our conventions.
|
// manifestPath returns a path for the manifest within a directory using our conventions.
|
||||||
func (ref dirReference) manifestPath(instanceDigest *digest.Digest) string {
|
func (ref dirReference) manifestPath(instanceDigest *digest.Digest) (string, error) {
|
||||||
if instanceDigest != nil {
|
if instanceDigest != nil {
|
||||||
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json")
|
if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
return filepath.Join(ref.path, "manifest.json")
|
return filepath.Join(ref.path, instanceDigest.Encoded()+".manifest.json"), nil
|
||||||
|
}
|
||||||
|
return filepath.Join(ref.path, "manifest.json"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// layerPath returns a path for a layer tarball within a directory using our conventions.
|
// layerPath returns a path for a layer tarball within a directory using our conventions.
|
||||||
func (ref dirReference) layerPath(digest digest.Digest) string {
|
func (ref dirReference) layerPath(digest digest.Digest) (string, error) {
|
||||||
|
if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
// FIXME: Should we keep the digest identification?
|
// FIXME: Should we keep the digest identification?
|
||||||
return filepath.Join(ref.path, digest.Encoded())
|
return filepath.Join(ref.path, digest.Encoded()), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// signaturePath returns a path for a signature within a directory using our conventions.
|
// signaturePath returns a path for a signature within a directory using our conventions.
|
||||||
func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) string {
|
func (ref dirReference) signaturePath(index int, instanceDigest *digest.Digest) (string, error) {
|
||||||
if instanceDigest != nil {
|
if instanceDigest != nil {
|
||||||
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1))
|
if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1))
|
return filepath.Join(ref.path, fmt.Sprintf(instanceDigest.Encoded()+".signature-%d", index+1)), nil
|
||||||
|
}
|
||||||
|
return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// versionPath returns a path for the version file within a directory using our conventions.
|
// versionPath returns a path for the version file within a directory using our conventions.
|
||||||
|
71
vendor/github.com/containers/image/v5/docker/body_reader.go
generated
vendored
71
vendor/github.com/containers/image/v5/docker/body_reader.go
generated
vendored
@ -17,21 +17,26 @@ import (
|
|||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// bodyReaderMinimumProgress is the minimum progress we want to see before we retry
|
const (
|
||||||
const bodyReaderMinimumProgress = 1 * 1024 * 1024
|
// bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry
|
||||||
|
bodyReaderMinimumProgress = 1 * 1024 * 1024
|
||||||
|
// bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry
|
||||||
|
bodyReaderMSSinceLastRetry = 60 * 1_000
|
||||||
|
)
|
||||||
|
|
||||||
// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob,
|
// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob,
|
||||||
// which can transparently resume some (very limited) kinds of aborted connections.
|
// which can transparently resume some (very limited) kinds of aborted connections.
|
||||||
type bodyReader struct {
|
type bodyReader struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
c *dockerClient
|
c *dockerClient
|
||||||
|
|
||||||
path string // path to pass to makeRequest to retry
|
path string // path to pass to makeRequest to retry
|
||||||
logURL *url.URL // a string to use in error messages
|
logURL *url.URL // a string to use in error messages
|
||||||
body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
|
|
||||||
lastRetryOffset int64
|
|
||||||
offset int64 // Current offset within the blob
|
|
||||||
firstConnectionTime time.Time
|
firstConnectionTime time.Time
|
||||||
|
|
||||||
|
body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close.
|
||||||
|
lastRetryOffset int64 // -1 if N/A
|
||||||
|
lastRetryTime time.Time // time.Time{} if N/A
|
||||||
|
offset int64 // Current offset within the blob
|
||||||
lastSuccessTime time.Time // time.Time{} if N/A
|
lastSuccessTime time.Time // time.Time{} if N/A
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,13 +51,15 @@ func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody
|
|||||||
res := &bodyReader{
|
res := &bodyReader{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
c: c,
|
c: c,
|
||||||
|
|
||||||
path: path,
|
path: path,
|
||||||
logURL: logURL,
|
logURL: logURL,
|
||||||
body: firstBody,
|
|
||||||
lastRetryOffset: 0,
|
|
||||||
offset: 0,
|
|
||||||
firstConnectionTime: time.Now(),
|
firstConnectionTime: time.Now(),
|
||||||
|
|
||||||
|
body: firstBody,
|
||||||
|
lastRetryOffset: -1,
|
||||||
|
lastRetryTime: time.Time{},
|
||||||
|
offset: 0,
|
||||||
|
lastSuccessTime: time.Time{},
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
@ -190,6 +197,7 @@ func (br *bodyReader) Read(p []byte) (int, error) {
|
|||||||
consumedBody = true
|
consumedBody = true
|
||||||
br.body = res.Body
|
br.body = res.Body
|
||||||
br.lastRetryOffset = br.offset
|
br.lastRetryOffset = br.offset
|
||||||
|
br.lastRetryTime = time.Time{}
|
||||||
return n, nil
|
return n, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -198,29 +206,40 @@ func (br *bodyReader) Read(p []byte) (int, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// millisecondsSince is like time.Since(tm).Milliseconds, but it returns a floating-point value
|
// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value.
|
||||||
func millisecondsSince(tm time.Time) float64 {
|
// If tm is time.Time{}, it returns math.NaN()
|
||||||
return float64(time.Since(tm).Nanoseconds()) / 1_000_000.0
|
func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 {
|
||||||
|
if tm == (time.Time{}) {
|
||||||
|
return math.NaN()
|
||||||
|
}
|
||||||
|
return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0
|
||||||
}
|
}
|
||||||
|
|
||||||
// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil,
|
// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil,
|
||||||
// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic)
|
// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic)
|
||||||
func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error {
|
func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error {
|
||||||
totalTime := millisecondsSince(br.firstConnectionTime)
|
currentTime := time.Now()
|
||||||
failureTime := math.NaN()
|
msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime)
|
||||||
if (br.lastSuccessTime != time.Time{}) {
|
msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime)
|
||||||
failureTime = millisecondsSince(br.lastSuccessTime)
|
msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime)
|
||||||
}
|
logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms",
|
||||||
logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: lastRetryOffset %d, offset %d, %.3f ms since first connection, %.3f ms since last progress",
|
redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess)
|
||||||
redactedURL, originalErr, br.lastRetryOffset, br.offset, totalTime, failureTime)
|
|
||||||
progress := br.offset - br.lastRetryOffset
|
progress := br.offset - br.lastRetryOffset
|
||||||
if progress < bodyReaderMinimumProgress {
|
if progress >= bodyReaderMinimumProgress {
|
||||||
logrus.Debugf("Not reconnecting to %s because only %d bytes progress made", redactedURL, progress)
|
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress)
|
||||||
return fmt.Errorf("(heuristic tuning data: last retry %d, current offset %d; %.3f ms total, %.3f ms since progress): %w",
|
|
||||||
br.lastRetryOffset, br.offset, totalTime, failureTime, originalErr)
|
|
||||||
}
|
|
||||||
logrus.Infof("Reading blob body from %s failed (%v), reconnecting…", redactedURL, originalErr)
|
|
||||||
return nil
|
return nil
|
||||||
|
}
|
||||||
|
if br.lastRetryTime == (time.Time{}) {
|
||||||
|
logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if msSinceLastRetry >= bodyReaderMSSinceLastRetry {
|
||||||
|
logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry)
|
||||||
|
return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w",
|
||||||
|
br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close implements io.ReadCloser
|
// Close implements io.ReadCloser
|
||||||
|
20
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
20
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -881,6 +881,8 @@ func (c *dockerClient) detectProperties(ctx context.Context) error {
|
|||||||
return c.detectPropertiesError
|
return c.detectPropertiesError
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest.
|
||||||
|
// The caller is responsible for ensuring tagOrDigest uses the expected format.
|
||||||
func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
|
func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) {
|
||||||
path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
|
path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest)
|
||||||
headers := map[string][]string{
|
headers := map[string][]string{
|
||||||
@ -963,6 +965,9 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
|
path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String())
|
||||||
logrus.Debugf("Downloading %s", path)
|
logrus.Debugf("Downloading %s", path)
|
||||||
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||||
@ -1025,7 +1030,10 @@ func isManifestUnknownError(err error) bool {
|
|||||||
// digest in ref.
|
// digest in ref.
|
||||||
// It returns (nil, nil) if the manifest does not exist.
|
// It returns (nil, nil) if the manifest does not exist.
|
||||||
func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
|
func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) {
|
||||||
tag := sigstoreAttachmentTag(digest)
|
tag, err := sigstoreAttachmentTag(digest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
|
sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -1058,6 +1066,9 @@ func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref do
|
|||||||
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
|
||||||
// using the original data structures.
|
// using the original data structures.
|
||||||
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
|
||||||
|
if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
|
||||||
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1081,6 +1092,9 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
|
// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest.
|
||||||
func sigstoreAttachmentTag(d digest.Digest) string {
|
func sigstoreAttachmentTag(d digest.Digest) (string, error) {
|
||||||
return strings.Replace(d.String(), ":", "-", 1) + ".sig"
|
if err := d.Validate(); err != nil { // Make sure d.String() doesn’t contain any unexpected characters
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil
|
||||||
}
|
}
|
||||||
|
16
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
16
vendor/github.com/containers/image/v5/docker/docker_image.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
|||||||
"github.com/containers/image/v5/manifest"
|
"github.com/containers/image/v5/manifest"
|
||||||
"github.com/containers/image/v5/types"
|
"github.com/containers/image/v5/types"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
|
// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods
|
||||||
@ -87,7 +88,20 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
|
|||||||
if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
|
if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
tags = append(tags, tagsHolder.Tags...)
|
for _, tag := range tagsHolder.Tags {
|
||||||
|
if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values
|
||||||
|
// Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory,
|
||||||
|
// contrary to the tag format specified in
|
||||||
|
// https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 ,
|
||||||
|
// include digests in the list.
|
||||||
|
if _, err := digest.Parse(tag); err == nil {
|
||||||
|
logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err)
|
||||||
|
}
|
||||||
|
tags = append(tags, tag)
|
||||||
|
}
|
||||||
|
|
||||||
link := res.Header.Get("Link")
|
link := res.Header.Get("Link")
|
||||||
if link == "" {
|
if link == "" {
|
||||||
|
22
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
22
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -226,6 +226,9 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
|||||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||||
// it returns a non-nil error only on an unexpected failure.
|
// it returns a non-nil error only on an unexpected failure.
|
||||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) {
|
||||||
|
if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters
|
||||||
|
return false, -1, err
|
||||||
|
}
|
||||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||||
logrus.Debugf("Checking %s", checkPath)
|
logrus.Debugf("Checking %s", checkPath)
|
||||||
res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
|
res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope)
|
||||||
@ -414,6 +417,7 @@ func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, inst
|
|||||||
// particular instance.
|
// particular instance.
|
||||||
refTail = instanceDigest.String()
|
refTail = instanceDigest.String()
|
||||||
// Double-check that the manifest we've been given matches the digest we've been given.
|
// Double-check that the manifest we've been given matches the digest we've been given.
|
||||||
|
// This also validates the format of instanceDigest.
|
||||||
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
matches, err := manifest.MatchesDigest(m, *instanceDigest)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("digesting manifest in PutManifest: %w", err)
|
return fmt.Errorf("digesting manifest in PutManifest: %w", err)
|
||||||
@ -580,11 +584,13 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
|
|||||||
|
|
||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
for i, signature := range signatures {
|
for i, signature := range signatures {
|
||||||
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||||
err := d.putOneSignature(sigURL, signature)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := d.putOneSignature(sigURL, signature); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Remove any other signatures, if present.
|
// Remove any other signatures, if present.
|
||||||
// We stop at the first missing signature; if a previous deleting loop aborted
|
// We stop at the first missing signature; if a previous deleting loop aborted
|
||||||
@ -592,7 +598,10 @@ func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature
|
|||||||
// is enough for dockerImageSource to stop looking for other signatures, so that
|
// is enough for dockerImageSource to stop looking for other signatures, so that
|
||||||
// is sufficient.
|
// is sufficient.
|
||||||
for i := len(signatures); ; i++ {
|
for i := len(signatures); ; i++ {
|
||||||
sigURL := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
missing, err := d.c.deleteOneSignature(sigURL)
|
missing, err := d.c.deleteOneSignature(sigURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -719,8 +728,12 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
attachmentTag, err := sigstoreAttachmentTag(manifestDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
logrus.Debugf("Uploading sigstore attachment manifest")
|
logrus.Debugf("Uploading sigstore attachment manifest")
|
||||||
return d.uploadManifest(ctx, manifestBlob, sigstoreAttachmentTag(manifestDigest))
|
return d.uploadManifest(ctx, manifestBlob, attachmentTag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
|
func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string,
|
||||||
@ -846,6 +859,7 @@ sigExists:
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above.
|
||||||
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
|
path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String())
|
||||||
res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
|
res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
18
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
18
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -188,6 +188,9 @@ func simplifyContentType(contentType string) string {
|
|||||||
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
||||||
func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||||
if instanceDigest != nil {
|
if instanceDigest != nil {
|
||||||
|
if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
return s.fetchManifest(ctx, instanceDigest.String())
|
return s.fetchManifest(ctx, instanceDigest.String())
|
||||||
}
|
}
|
||||||
err := s.ensureManifestIsLoaded(ctx)
|
err := s.ensureManifestIsLoaded(ctx)
|
||||||
@ -197,6 +200,8 @@ func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *dig
|
|||||||
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
return s.cachedManifest, s.cachedManifestMIMEType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// fetchManifest fetches a manifest for tagOrDigest.
|
||||||
|
// The caller is responsible for ensuring tagOrDigest uses the expected format.
|
||||||
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
|
func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
|
||||||
return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
|
return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest)
|
||||||
}
|
}
|
||||||
@ -346,6 +351,9 @@ func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo,
|
|||||||
return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
|
return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String())
|
||||||
logrus.Debugf("Downloading %s", path)
|
logrus.Debugf("Downloading %s", path)
|
||||||
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
|
res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil)
|
||||||
@ -456,7 +464,10 @@ func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, inst
|
|||||||
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
return nil, fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures)
|
||||||
}
|
}
|
||||||
|
|
||||||
sigURL := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
|
sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
signature, missing, err := s.getOneSignature(ctx, sigURL)
|
signature, missing, err := s.getOneSignature(ctx, sigURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -649,7 +660,10 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; ; i++ {
|
for i := 0; ; i++ {
|
||||||
sigURL := lookasideStorageURL(c.signatureBase, manifestDigest, i)
|
sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
missing, err := c.deleteOneSignature(sigURL)
|
missing, err := c.deleteOneSignature(sigURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
12
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@ -111,11 +111,19 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
|
|||||||
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
|
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
|
||||||
}
|
}
|
||||||
d.config = buf
|
d.config = buf
|
||||||
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
configPath, err := d.archive.configPath(inputInfo.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
if err := d.archive.sendFileLocked(configPath, inputInfo.Size, bytes.NewReader(buf)); err != nil {
|
||||||
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
|
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
|
layerPath, err := d.archive.physicalLayerPath(inputInfo.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return types.BlobInfo{}, err
|
||||||
|
}
|
||||||
|
if err := d.archive.sendFileLocked(layerPath, inputInfo.Size, stream); err != nil {
|
||||||
return types.BlobInfo{}, err
|
return types.BlobInfo{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
34
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
34
vendor/github.com/containers/image/v5/docker/internal/tarfile/writer.go
generated
vendored
@ -92,7 +92,10 @@ func (w *Writer) ensureSingleLegacyLayerLocked(layerID string, layerDigest diges
|
|||||||
if _, ok := w.legacyLayers[layerID]; !ok {
|
if _, ok := w.legacyLayers[layerID]; !ok {
|
||||||
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
|
// Create a symlink for the legacy format, where there is one subdirectory per layer ("image").
|
||||||
// See also the comment in physicalLayerPath.
|
// See also the comment in physicalLayerPath.
|
||||||
physicalLayerPath := w.physicalLayerPath(layerDigest)
|
physicalLayerPath, err := w.physicalLayerPath(layerDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
if err := w.sendSymlinkLocked(filepath.Join(layerID, legacyLayerFileName), filepath.Join("..", physicalLayerPath)); err != nil {
|
||||||
return fmt.Errorf("creating layer symbolic link: %w", err)
|
return fmt.Errorf("creating layer symbolic link: %w", err)
|
||||||
}
|
}
|
||||||
@ -136,6 +139,9 @@ func (w *Writer) writeLegacyMetadataLocked(layerDescriptors []manifest.Schema2De
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
|
// This chainID value matches the computation in docker/docker/layer.CreateChainID …
|
||||||
|
if err := l.Digest.Validate(); err != nil { // This should never fail on this code path, still: make sure the chainID computation is unambiguous.
|
||||||
|
return err
|
||||||
|
}
|
||||||
if chainID == "" {
|
if chainID == "" {
|
||||||
chainID = l.Digest
|
chainID = l.Digest
|
||||||
} else {
|
} else {
|
||||||
@ -206,12 +212,20 @@ func checkManifestItemsMatch(a, b *ManifestItem) error {
|
|||||||
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
|
func (w *Writer) ensureManifestItemLocked(layerDescriptors []manifest.Schema2Descriptor, configDigest digest.Digest, repoTags []reference.NamedTagged) error {
|
||||||
layerPaths := []string{}
|
layerPaths := []string{}
|
||||||
for _, l := range layerDescriptors {
|
for _, l := range layerDescriptors {
|
||||||
layerPaths = append(layerPaths, w.physicalLayerPath(l.Digest))
|
p, err := w.physicalLayerPath(l.Digest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
layerPaths = append(layerPaths, p)
|
||||||
}
|
}
|
||||||
|
|
||||||
var item *ManifestItem
|
var item *ManifestItem
|
||||||
|
configPath, err := w.configPath(configDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
newItem := ManifestItem{
|
newItem := ManifestItem{
|
||||||
Config: w.configPath(configDigest),
|
Config: configPath,
|
||||||
RepoTags: []string{},
|
RepoTags: []string{},
|
||||||
Layers: layerPaths,
|
Layers: layerPaths,
|
||||||
Parent: "", // We don’t have this information
|
Parent: "", // We don’t have this information
|
||||||
@ -296,21 +310,27 @@ func (w *Writer) Close() error {
|
|||||||
// configPath returns a path we choose for storing a config with the specified digest.
|
// configPath returns a path we choose for storing a config with the specified digest.
|
||||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||||
// any time.
|
// any time.
|
||||||
func (w *Writer) configPath(configDigest digest.Digest) string {
|
func (w *Writer) configPath(configDigest digest.Digest) (string, error) {
|
||||||
return configDigest.Hex() + ".json"
|
if err := configDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return configDigest.Hex() + ".json", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
// physicalLayerPath returns a path we choose for storing a layer with the specified digest
|
||||||
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
|
// (the actual path, i.e. a regular file, not a symlink that may be used in the legacy format).
|
||||||
// NOTE: This is an internal implementation detail, not a format property, and can change
|
// NOTE: This is an internal implementation detail, not a format property, and can change
|
||||||
// any time.
|
// any time.
|
||||||
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) string {
|
func (w *Writer) physicalLayerPath(layerDigest digest.Digest) (string, error) {
|
||||||
|
if err := layerDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in unexpected paths, so validate explicitly.
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
// Note that this can't be e.g. filepath.Join(l.Digest.Hex(), legacyLayerFileName); due to the way
|
||||||
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
// writeLegacyMetadata constructs layer IDs differently from inputinfo.Digest values (as described
|
||||||
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
// inside it), most of the layers would end up in subdirectories alone without any metadata; (docker load)
|
||||||
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
// tries to load every subdirectory as an image and fails if the config is missing. So, keep the layers
|
||||||
// in the root of the tarball.
|
// in the root of the tarball.
|
||||||
return layerDigest.Hex() + ".tar"
|
return layerDigest.Hex() + ".tar", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type tarFI struct {
|
type tarFI struct {
|
||||||
|
7
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
7
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@ -286,8 +286,11 @@ func (ns registryNamespace) signatureTopLevel(write bool) string {
|
|||||||
// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
|
// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest.
|
||||||
// base is not nil from the caller
|
// base is not nil from the caller
|
||||||
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
// NOTE: Keep this in sync with docs/signature-protocols.md!
|
||||||
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) *url.URL {
|
func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) {
|
||||||
|
if err := manifestDigest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, and could possibly result in a path with ../, so validate explicitly.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
sigURL := *base
|
sigURL := *base
|
||||||
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
|
||||||
return &sigURL
|
return &sigURL, nil
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/containers/image/v5/openshift/openshift_src.go
generated
vendored
3
vendor/github.com/containers/image/v5/openshift/openshift_src.go
generated
vendored
@ -108,6 +108,9 @@ func (s *openshiftImageSource) GetSignaturesWithFormat(ctx context.Context, inst
|
|||||||
}
|
}
|
||||||
imageStreamImageName = s.imageStreamImageName
|
imageStreamImageName = s.imageStreamImageName
|
||||||
} else {
|
} else {
|
||||||
|
if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
imageStreamImageName = instanceDigest.String()
|
imageStreamImageName = instanceDigest.String()
|
||||||
}
|
}
|
||||||
image, err := s.client.getImage(ctx, imageStreamImageName)
|
image, err := s.client.getImage(ctx, imageStreamImageName)
|
||||||
|
10
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
10
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@ -342,6 +342,10 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||||||
}
|
}
|
||||||
d.repo = repo
|
d.repo = repo
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := info.Digest.Validate(); err != nil { // digest.Digest.Hex() panics on failure, so validate explicitly.
|
||||||
|
return false, private.ReusedBlob{}, err
|
||||||
|
}
|
||||||
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
|
branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex())
|
||||||
|
|
||||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||||
@ -467,12 +471,18 @@ func (d *ostreeImageDestination) Commit(context.Context, types.UnparsedImage) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for _, layer := range d.schema.LayersDescriptors {
|
for _, layer := range d.schema.LayersDescriptors {
|
||||||
|
if err := layer.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
|
return err
|
||||||
|
}
|
||||||
hash := layer.Digest.Hex()
|
hash := layer.Digest.Hex()
|
||||||
if err = checkLayer(hash); err != nil {
|
if err = checkLayer(hash); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, layer := range d.schema.FSLayers {
|
for _, layer := range d.schema.FSLayers {
|
||||||
|
if err := layer.BlobSum.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
|
return err
|
||||||
|
}
|
||||||
hash := layer.BlobSum.Hex()
|
hash := layer.BlobSum.Hex()
|
||||||
if err = checkLayer(hash); err != nil {
|
if err = checkLayer(hash); err != nil {
|
||||||
return err
|
return err
|
||||||
|
4
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
@ -286,7 +286,9 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
|
|||||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||||
|
if err := info.Digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
blob := info.Digest.Hex()
|
blob := info.Digest.Hex()
|
||||||
|
|
||||||
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
|
// Ensure s.compressed is initialized. It is build by LayerInfosForCopy.
|
||||||
|
29
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
29
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -311,6 +311,13 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
|||||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
|
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
|
||||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||||
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
|
||||||
|
if blobinfo.Digest == "" {
|
||||||
|
return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
|
||||||
|
}
|
||||||
|
if err := blobinfo.Digest.Validate(); err != nil {
|
||||||
|
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
// lock the entire method as it executes fairly quickly
|
// lock the entire method as it executes fairly quickly
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
@ -332,13 +339,6 @@ func (s *storageImageDestination) tryReusingBlobAsPending(ctx context.Context, b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if blobinfo.Digest == "" {
|
|
||||||
return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
|
|
||||||
}
|
|
||||||
if err := blobinfo.Digest.Validate(); err != nil {
|
|
||||||
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we've already cached it in a file.
|
// Check if we've already cached it in a file.
|
||||||
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
|
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
|
||||||
return true, types.BlobInfo{
|
return true, types.BlobInfo{
|
||||||
@ -818,7 +818,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("digesting top-level manifest: %w", err)
|
return fmt.Errorf("digesting top-level manifest: %w", err)
|
||||||
}
|
}
|
||||||
key := manifestBigDataKey(manifestDigest)
|
key, err := manifestBigDataKey(manifestDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil {
|
||||||
logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err)
|
||||||
return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err)
|
return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err)
|
||||||
@ -827,7 +830,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||||||
// Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
|
// Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store.
|
||||||
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
|
// Record the manifest twice: using a digest-specific key to allow references to that specific digest instance,
|
||||||
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
|
// and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers.
|
||||||
key := manifestBigDataKey(s.manifestDigest)
|
key, err := manifestBigDataKey(s.manifestDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil {
|
||||||
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving manifest for image %q: %v", img.ID, err)
|
||||||
return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
|
return fmt.Errorf("saving manifest for image %q: %w", img.ID, err)
|
||||||
@ -845,7 +851,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for instanceDigest, signatures := range s.signatureses {
|
for instanceDigest, signatures := range s.signatureses {
|
||||||
key := signatureBigDataKey(instanceDigest)
|
key, err := signatureBigDataKey(instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
|
if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil {
|
||||||
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
logrus.Debugf("error saving signatures for image %q: %v", img.ID, err)
|
||||||
return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
|
return fmt.Errorf("saving signatures for image %q: %w", img.ID, err)
|
||||||
|
14
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
14
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
@ -26,14 +26,20 @@ type storageImageCloser struct {
|
|||||||
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
||||||
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
|
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
|
||||||
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
|
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
|
||||||
func manifestBigDataKey(digest digest.Digest) string {
|
func manifestBigDataKey(digest digest.Digest) (string, error) {
|
||||||
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String()
|
if err := digest.Validate(); err != nil { // Make sure info.Digest.String() uses the expected format and does not collide with other BigData keys.
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return storage.ImageDigestManifestBigDataNamePrefix + "-" + digest.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
// signatureBigDataKey returns a key suitable for recording the signatures associated with the manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
||||||
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
|
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
|
||||||
func signatureBigDataKey(digest digest.Digest) string {
|
func signatureBigDataKey(digest digest.Digest) (string, error) {
|
||||||
return "signature-" + digest.Encoded()
|
if err := digest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return "signature-" + digest.Encoded(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size() returns the previously-computed size of the image, with no error.
|
// Size() returns the previously-computed size of the image, with no error.
|
||||||
|
10
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
10
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
@ -73,7 +73,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
|
|||||||
// We don't need to care about storage.ImageDigestBigDataKey because
|
// We don't need to care about storage.ImageDigestBigDataKey because
|
||||||
// manifests lists are only stored into storage by c/image versions
|
// manifests lists are only stored into storage by c/image versions
|
||||||
// that know about manifestBigDataKey, and only using that key.
|
// that know about manifestBigDataKey, and only using that key.
|
||||||
key := manifestBigDataKey(manifestDigest)
|
key, err := manifestBigDataKey(manifestDigest)
|
||||||
|
if err != nil {
|
||||||
|
return false // This should never happen, manifestDigest comes from a reference.Digested, and that validates the format.
|
||||||
|
}
|
||||||
manifestBytes, err := store.ImageBigData(img.ID, key)
|
manifestBytes, err := store.ImageBigData(img.ID, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -95,7 +98,10 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
key = manifestBigDataKey(chosenInstance)
|
key, err = manifestBigDataKey(chosenInstance)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
_, err = store.ImageBigData(img.ID, key)
|
_, err = store.ImageBigData(img.ID, key)
|
||||||
return err == nil // true if img.ID is based on chosenInstance.
|
return err == nil // true if img.ID is based on chosenInstance.
|
||||||
}
|
}
|
||||||
|
19
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
19
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
@ -188,7 +188,10 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
|
|||||||
// GetManifest() reads the image's manifest.
|
// GetManifest() reads the image's manifest.
|
||||||
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
|
func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) {
|
||||||
if instanceDigest != nil {
|
if instanceDigest != nil {
|
||||||
key := manifestBigDataKey(*instanceDigest)
|
key, err := manifestBigDataKey(*instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
|
return nil, "", fmt.Errorf("reading manifest for image instance %q: %w", *instanceDigest, err)
|
||||||
@ -200,7 +203,10 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
|||||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||||
if s.imageRef.named != nil {
|
if s.imageRef.named != nil {
|
||||||
if digested, ok := s.imageRef.named.(reference.Digested); ok {
|
if digested, ok := s.imageRef.named.(reference.Digested); ok {
|
||||||
key := manifestBigDataKey(digested.Digest())
|
key, err := manifestBigDataKey(digested.Digest())
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
blob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, key)
|
||||||
if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
|
if err != nil && !os.IsNotExist(err) { // os.IsNotExist is true if the image exists but there is no data corresponding to key
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
@ -315,7 +321,14 @@ func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instan
|
|||||||
instance := "default instance"
|
instance := "default instance"
|
||||||
if instanceDigest != nil {
|
if instanceDigest != nil {
|
||||||
signatureSizes = s.SignaturesSizes[*instanceDigest]
|
signatureSizes = s.SignaturesSizes[*instanceDigest]
|
||||||
key = signatureBigDataKey(*instanceDigest)
|
k, err := signatureBigDataKey(*instanceDigest)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key = k
|
||||||
|
if err := instanceDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, so validate explicitly.
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
instance = instanceDigest.Encoded()
|
instance = instanceDigest.Encoded()
|
||||||
}
|
}
|
||||||
if len(signatureSizes) > 0 {
|
if len(signatureSizes) > 0 {
|
||||||
|
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
2
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -8,7 +8,7 @@ const (
|
|||||||
// VersionMinor is for functionality in a backwards-compatible manner
|
// VersionMinor is for functionality in a backwards-compatible manner
|
||||||
VersionMinor = 24
|
VersionMinor = 24
|
||||||
// VersionPatch is for backwards-compatible bug fixes
|
// VersionPatch is for backwards-compatible bug fixes
|
||||||
VersionPatch = 1
|
VersionPatch = 3
|
||||||
|
|
||||||
// VersionDev indicates development branch. Releases will be empty string.
|
// VersionDev indicates development branch. Releases will be empty string.
|
||||||
VersionDev = ""
|
VersionDev = ""
|
||||||
|
4
vendor/modules.txt
vendored
4
vendor/modules.txt
vendored
@ -50,7 +50,7 @@ github.com/containerd/cgroups/stats/v1
|
|||||||
## explicit; go 1.16
|
## explicit; go 1.16
|
||||||
github.com/containerd/stargz-snapshotter/estargz
|
github.com/containerd/stargz-snapshotter/estargz
|
||||||
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
github.com/containerd/stargz-snapshotter/estargz/errorutil
|
||||||
# github.com/containers/common v0.51.0
|
# github.com/containers/common v0.51.4
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/common/pkg/auth
|
github.com/containers/common/pkg/auth
|
||||||
github.com/containers/common/pkg/capabilities
|
github.com/containers/common/pkg/capabilities
|
||||||
@ -59,7 +59,7 @@ github.com/containers/common/pkg/flag
|
|||||||
github.com/containers/common/pkg/report
|
github.com/containers/common/pkg/report
|
||||||
github.com/containers/common/pkg/report/camelcase
|
github.com/containers/common/pkg/report/camelcase
|
||||||
github.com/containers/common/pkg/retry
|
github.com/containers/common/pkg/retry
|
||||||
# github.com/containers/image/v5 v5.24.1
|
# github.com/containers/image/v5 v5.24.3
|
||||||
## explicit; go 1.17
|
## explicit; go 1.17
|
||||||
github.com/containers/image/v5/copy
|
github.com/containers/image/v5/copy
|
||||||
github.com/containers/image/v5/directory
|
github.com/containers/image/v5/directory
|
||||||
|
Loading…
Reference in New Issue
Block a user