mirror of
https://github.com/containers/skopeo.git
synced 2025-09-07 17:54:09 +00:00
fix(deps): update module github.com/containers/image/v5 to v5.32.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
10
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
10
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@@ -73,7 +73,7 @@ type bpCompressionStepData struct {
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
@@ -323,7 +323,11 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
|
||||
}
|
||||
}
|
||||
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.srcCompressorName == "" || d.uploadedCompressorName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)",
|
||||
d.srcCompressorName, d.uploadedCompressorName)
|
||||
}
|
||||
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms.
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
|
||||
@@ -337,7 +341,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
}
|
||||
}
|
||||
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
|
||||
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms, see above.
|
||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
|
||||
|
14
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
14
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@@ -409,7 +409,6 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
||||
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
|
||||
func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algorithm, error) {
|
||||
srcInfos := ic.src.LayerInfos()
|
||||
numLayers := len(srcInfos)
|
||||
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -440,7 +439,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
|
||||
// copyGroup is used to determine if all layers are copied
|
||||
copyGroup := sync.WaitGroup{}
|
||||
|
||||
data := make([]copyLayerData, numLayers)
|
||||
data := make([]copyLayerData, len(srcInfos))
|
||||
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
|
||||
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
|
||||
defer copyGroup.Done()
|
||||
@@ -463,9 +462,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
|
||||
|
||||
// Decide which layers to encrypt
|
||||
layersToEncrypt := set.New[int]()
|
||||
var encryptAll bool
|
||||
if ic.c.options.OciEncryptLayers != nil {
|
||||
encryptAll = len(*ic.c.options.OciEncryptLayers) == 0
|
||||
totalLayers := len(srcInfos)
|
||||
for _, l := range *ic.c.options.OciEncryptLayers {
|
||||
switch {
|
||||
@@ -478,7 +475,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
|
||||
}
|
||||
}
|
||||
|
||||
if encryptAll {
|
||||
if len(*ic.c.options.OciEncryptLayers) == 0 { // “encrypt all layers”
|
||||
for i := 0; i < len(srcInfos); i++ {
|
||||
layersToEncrypt.Add(i)
|
||||
}
|
||||
@@ -493,8 +490,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
|
||||
defer copyGroup.Wait()
|
||||
|
||||
for i, srcLayer := range srcInfos {
|
||||
err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
|
||||
if err != nil {
|
||||
if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
|
||||
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
|
||||
return fmt.Errorf("copying layer: %w", err)
|
||||
}
|
||||
@@ -509,8 +505,8 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
|
||||
}
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
destInfos := make([]types.BlobInfo, numLayers)
|
||||
diffIDs := make([]digest.Digest, numLayers)
|
||||
destInfos := make([]types.BlobInfo, len(srcInfos))
|
||||
diffIDs := make([]digest.Digest, len(srcInfos))
|
||||
for i, cld := range data {
|
||||
if cld.err != nil {
|
||||
return nil, cld.err
|
||||
|
76
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
76
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@@ -86,11 +86,9 @@ type extensionSignatureList struct {
|
||||
Signatures []extensionSignature `json:"signatures"`
|
||||
}
|
||||
|
||||
// bearerToken records a cached token we can use to authenticate.
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
token string
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
@@ -147,25 +145,6 @@ const (
|
||||
noAuth
|
||||
)
|
||||
|
||||
func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
token := new(bearerToken)
|
||||
if err := json.Unmarshal(blob, &token); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if token.Token == "" {
|
||||
token.Token = token.AccessToken
|
||||
}
|
||||
if token.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
token.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
|
||||
}
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort.
|
||||
func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
|
||||
if sys != nil && sys.DockerCertPath != "" {
|
||||
@@ -774,7 +753,7 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope
|
||||
token = *t
|
||||
c.tokenCache.Store(cacheKey, token)
|
||||
}
|
||||
registryToken = token.Token
|
||||
registryToken = token.token
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken))
|
||||
return nil
|
||||
@@ -827,12 +806,7 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
return newBearerTokenFromHTTPResponseBody(res)
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
@@ -878,12 +852,50 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge,
|
||||
if err := httpResponseToError(res, "Requesting bearer token"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
|
||||
return newBearerTokenFromHTTPResponseBody(res)
|
||||
}
|
||||
|
||||
// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken.
|
||||
// The caller is still responsible for ensuring res.Body is closed.
|
||||
func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) {
|
||||
blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newBearerTokenFromJSONBlob(tokenBlob)
|
||||
var token struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
expirationTime time.Time
|
||||
}
|
||||
if err := json.Unmarshal(blob, &token); err != nil {
|
||||
const bodySampleLength = 50
|
||||
bodySample := blob
|
||||
if len(bodySample) > bodySampleLength {
|
||||
bodySample = bodySample[:bodySampleLength]
|
||||
}
|
||||
return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err)
|
||||
}
|
||||
|
||||
bt := &bearerToken{
|
||||
token: token.Token,
|
||||
}
|
||||
if bt.token == "" {
|
||||
bt.token = token.AccessToken
|
||||
}
|
||||
|
||||
if token.ExpiresIn < minimumTokenLifetimeSeconds {
|
||||
token.ExpiresIn = minimumTokenLifetimeSeconds
|
||||
logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
|
||||
}
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return bt, nil
|
||||
}
|
||||
|
||||
// detectPropertiesHelper performs the work of detectProperties which executes
|
||||
|
2
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -361,8 +361,6 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !candidate.UnknownLocation {
|
||||
if candidate.CompressionAlgorithm != nil {
|
||||
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name())
|
||||
} else {
|
||||
|
31
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
31
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@@ -1,7 +1,9 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -11,6 +13,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
@@ -162,6 +165,34 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
|
||||
client.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" {
|
||||
acf := map[string]struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
IdentityToken string `json:"identityToken,omitempty"`
|
||||
}{
|
||||
physicalRef.ref.String(): {
|
||||
Username: client.auth.Username,
|
||||
Password: client.auth.Password,
|
||||
IdentityToken: client.auth.IdentityToken,
|
||||
},
|
||||
}
|
||||
acfD, err := json.Marshal(acf)
|
||||
if err != nil {
|
||||
logrus.Warnf("failed to marshal auth config: %v", err)
|
||||
} else {
|
||||
cmd := exec.Command(h)
|
||||
cmd.Stdin = bytes.NewReader(acfD)
|
||||
if err := cmd.Run(); err != nil {
|
||||
var stderr string
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
stderr = string(ee.Stderr)
|
||||
}
|
||||
logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
55
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
55
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package manifest
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
@@ -296,29 +297,51 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
|
||||
},
|
||||
}
|
||||
for i, component := range components {
|
||||
var platform *imgspecv1.Platform
|
||||
if component.Platform != nil {
|
||||
platformCopy := ociPlatformClone(*component.Platform)
|
||||
platform = &platformCopy
|
||||
}
|
||||
m := imgspecv1.Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
ArtifactType: component.ArtifactType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: slices.Clone(component.URLs),
|
||||
Annotations: maps.Clone(component.Annotations),
|
||||
Platform: platform,
|
||||
}
|
||||
index.Manifests[i] = m
|
||||
index.Manifests[i] = oci1DescriptorClone(component)
|
||||
}
|
||||
return &index
|
||||
}
|
||||
|
||||
func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor {
|
||||
var platform *imgspecv1.Platform
|
||||
if d.Platform != nil {
|
||||
platformCopy := ociPlatformClone(*d.Platform)
|
||||
platform = &platformCopy
|
||||
}
|
||||
return imgspecv1.Descriptor{
|
||||
MediaType: d.MediaType,
|
||||
Digest: d.Digest,
|
||||
Size: d.Size,
|
||||
URLs: slices.Clone(d.URLs),
|
||||
Annotations: maps.Clone(d.Annotations),
|
||||
Data: bytes.Clone(d.Data),
|
||||
Platform: platform,
|
||||
ArtifactType: d.ArtifactType,
|
||||
}
|
||||
}
|
||||
|
||||
// OCI1IndexPublicClone creates a deep copy of the passed-in index.
|
||||
// This is publicly visible as c/image/manifest.OCI1IndexClone.
|
||||
func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic {
|
||||
return OCI1IndexPublicFromComponents(index.Manifests, index.Annotations)
|
||||
var subject *imgspecv1.Descriptor
|
||||
if index.Subject != nil {
|
||||
s := oci1DescriptorClone(*index.Subject)
|
||||
subject = &s
|
||||
}
|
||||
manifests := make([]imgspecv1.Descriptor, len(index.Manifests))
|
||||
for i, m := range index.Manifests {
|
||||
manifests[i] = oci1DescriptorClone(m)
|
||||
}
|
||||
return &OCI1IndexPublic{
|
||||
Index: imgspecv1.Index{
|
||||
Versioned: index.Versioned,
|
||||
MediaType: index.MediaType,
|
||||
ArtifactType: index.ArtifactType,
|
||||
Manifests: manifests,
|
||||
Subject: subject,
|
||||
Annotations: maps.Clone(index.Annotations),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ToOCI1Index returns the index encoded as an OCI1 index.
|
||||
|
12
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
12
vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go
generated
vendored
@@ -74,3 +74,15 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||
logrus.Debugf("Using SQLite blob info cache at %s", path)
|
||||
return cache
|
||||
}
|
||||
|
||||
// CleanupDefaultCache removes the blob info cache directory.
|
||||
// It deletes the cache directory but it does not affect any file or memory buffer currently
|
||||
// in use.
|
||||
func CleanupDefaultCache(sys *types.SystemContext) error {
|
||||
dir, err := blobInfoCacheDir(sys, rootless.GetRootlessEUID())
|
||||
if err != nil {
|
||||
// Mirror the DefaultCache behavior that does not fail in this case
|
||||
return nil
|
||||
}
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@@ -27,7 +27,7 @@ type cache struct {
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
|
20
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
20
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@@ -248,6 +248,11 @@ type V2RegistriesConf struct {
|
||||
// potentially use all unqualified-search registries
|
||||
ShortNameMode string `toml:"short-name-mode"`
|
||||
|
||||
// AdditionalLayerStoreAuthHelper is a helper binary that receives
|
||||
// registry credentials pass them to Additional Layer Store for
|
||||
// registry authentication. These credentials are only collected when pulling (not pushing).
|
||||
AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"`
|
||||
|
||||
shortNameAliasConf
|
||||
|
||||
// If you add any field, make sure to update Nonempty() below.
|
||||
@@ -825,6 +830,16 @@ func CredentialHelpers(sys *types.SystemContext) ([]string, error) {
|
||||
return config.partialV2.CredentialHelpers, nil
|
||||
}
|
||||
|
||||
// AdditionalLayerStoreAuthHelper returns the helper for passing registry
|
||||
// credentials to Additional Layer Store.
|
||||
func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) {
|
||||
config, err := getConfig(sys)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return config.partialV2.AdditionalLayerStoreAuthHelper, nil
|
||||
}
|
||||
|
||||
// refMatchingSubdomainPrefix returns the length of ref
|
||||
// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by
|
||||
// reference.Domain(), reference.Named.Name() or reference.Reference.String()
|
||||
@@ -1051,6 +1066,11 @@ func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) {
|
||||
c.shortNameMode = updates.shortNameMode
|
||||
}
|
||||
|
||||
// == Merge AdditionalLayerStoreAuthHelper:
|
||||
if updates.partialV2.AdditionalLayerStoreAuthHelper != "" {
|
||||
c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper
|
||||
}
|
||||
|
||||
// == Merge aliasCache:
|
||||
// We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf.
|
||||
c.aliasCache.updateWithConfigurationFrom(updates.aliasCache)
|
||||
|
3
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/mechanism_openpgp.go
generated
vendored
@@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/signature/internal"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
|
||||
// This is a fallback code; the primary recommendation is to use the gpgme mechanism
|
||||
// implementation, which is out-of-process and more appropriate for handling long-term private key material
|
||||
// than any Go implementation.
|
||||
@@ -150,7 +151,7 @@ func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents [
|
||||
return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
|
||||
}
|
||||
if md.SignedBy == nil {
|
||||
return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Invalid GPG signature: %#v", md.Signature))
|
||||
return nil, "", internal.NewInvalidSignatureError(fmt.Sprintf("Key not found for key ID %x in signature", md.SignedByKeyId))
|
||||
}
|
||||
if md.Signature != nil {
|
||||
if md.Signature.SigLifetimeSecs != nil {
|
||||
|
6
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
6
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@@ -325,7 +325,13 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
if out.UncompressedDigest != "" {
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
// We trust ApplyDiffWithDiffer to validate or create both values correctly.
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
// Don’t identify layers by TOC if UncompressedDigest is available.
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 31
|
||||
VersionMinor = 32
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
Reference in New Issue
Block a user