mirror of
https://github.com/containers/skopeo.git
synced 2025-09-01 22:58:24 +00:00
Update c/image after https://github.com/containers/image/pull/2273
This is necessary so that c/image tests can pass. Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
75
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
75
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@@ -70,7 +70,8 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
|
||||
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
|
||||
type bpCompressionStepData struct {
|
||||
operation types.LayerCompression // Operation to use for updating the blob metadata.
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
|
||||
@@ -78,6 +79,18 @@ type bpCompressionStepData struct {
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
}
|
||||
|
||||
type bpcOperation int
|
||||
|
||||
const (
|
||||
bpcOpInvalid bpcOperation = iota
|
||||
bpcOpPreserveOpaque // We are preserving something where compression is not applicable
|
||||
bpcOpPreserveCompressed // We are preserving a compressed, and decompressible, layer
|
||||
bpcOpPreserveUncompressed // We are preserving an uncompressed, and compressible, layer
|
||||
bpcOpCompressUncompressed // We are compressing uncompressed data
|
||||
bpcOpRecompressCompressed // We are recompressing compressed data
|
||||
bpcOpDecompressCompressed // We are decompressing compressed data
|
||||
)
|
||||
|
||||
// blobPipelineCompressionStep updates *stream to compress and/or decompress it.
|
||||
// srcInfo is primarily used for error messages.
|
||||
// Returns data for other steps; the caller should eventually call updateCompressionEdits and perhaps recordValidatedBlobData,
|
||||
@@ -112,10 +125,11 @@ func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModi
|
||||
// bpcPreserveEncrypted checks if the input is encrypted, and returns a *bpCompressionStepData if so.
|
||||
func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
|
||||
if isOciEncrypted(stream.info.MediaType) {
|
||||
// We can’t do anything with an encrypted blob unless decrypted.
|
||||
logrus.Debugf("Using original blob without modification for encrypted blob")
|
||||
// PreserveOriginal due to any compression not being able to be done on an encrypted blob unless decrypted
|
||||
return &bpCompressionStepData{
|
||||
operation: types.PreserveOriginal,
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorName: internalblobinfocache.UnknownCompression,
|
||||
@@ -143,7 +157,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
||||
Size: -1,
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: types.Compress,
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
@@ -182,7 +197,8 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
|
||||
}
|
||||
succeeded = true
|
||||
return &bpCompressionStepData{
|
||||
operation: types.PreserveOriginal,
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
@@ -208,7 +224,8 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
|
||||
Size: -1,
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: types.Decompress,
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: internalblobinfocache.Uncompressed,
|
||||
@@ -232,14 +249,26 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
|
||||
// But don’t touch blobs in objects where we can’t change compression,
|
||||
// so that src.UpdatedImage() doesn’t fail; assume that for such blobs
|
||||
// LayerInfosForCopy() should not be making any changes in the first place.
|
||||
var bpcOp bpcOperation
|
||||
var uploadedOp types.LayerCompression
|
||||
var algorithm *compressiontypes.Algorithm
|
||||
if layerCompressionChangeSupported && detected.isCompressed {
|
||||
switch {
|
||||
case !layerCompressionChangeSupported:
|
||||
bpcOp = bpcOpPreserveOpaque
|
||||
uploadedOp = types.PreserveOriginal
|
||||
algorithm = nil
|
||||
case detected.isCompressed:
|
||||
bpcOp = bpcOpPreserveCompressed
|
||||
uploadedOp = types.PreserveOriginal
|
||||
algorithm = &detected.format
|
||||
} else {
|
||||
default:
|
||||
bpcOp = bpcOpPreserveUncompressed
|
||||
uploadedOp = types.Decompress
|
||||
algorithm = nil
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: types.PreserveOriginal,
|
||||
operation: bpcOp,
|
||||
uploadedOperation: uploadedOp,
|
||||
uploadedAlgorithm: algorithm,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: detected.srcCompressorName,
|
||||
@@ -248,7 +277,7 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
|
||||
|
||||
// updateCompressionEdits sets *operation, *algorithm and updates *annotations, if necessary.
|
||||
func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCompression, algorithm **compressiontypes.Algorithm, annotations *map[string]string) {
|
||||
*operation = d.operation
|
||||
*operation = d.uploadedOperation
|
||||
// If we can modify the layer's blob, set the desired algorithm for it to be set in the manifest.
|
||||
*algorithm = d.uploadedAlgorithm
|
||||
if *annotations == nil {
|
||||
@@ -257,7 +286,8 @@ func (d *bpCompressionStepData) updateCompressionEdits(operation *types.LayerCom
|
||||
maps.Copy(*annotations, d.uploadedAnnotations)
|
||||
}
|
||||
|
||||
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo adnd the original srcInfo.
|
||||
// recordValidatedBlobData updates b.blobInfoCache with data about the created uploadedInfo (as returned by PutBlob)
|
||||
// and the original srcInfo (which the caller guarantees has been validated).
|
||||
// This must ONLY be called if all data has been validated by OUR code, and is not coming from third parties.
|
||||
func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInfo types.BlobInfo, srcInfo types.BlobInfo,
|
||||
encryptionStep *bpEncryptionStepData, decryptionStep *bpDecryptionStepData) error {
|
||||
@@ -268,17 +298,26 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
// in the blob info cache (which would probably be necessary for any more complex logic),
|
||||
// and the simplicity is attractive.
|
||||
if !encryptionStep.encrypting && !decryptionStep.decrypting {
|
||||
// If d.operation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// If d.operation != bpcOpPreserve*, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-d.operation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-d.operation output, computed by PutBlob
|
||||
// (because stream.info.Digest == "", this must have been computed afresh).
|
||||
// (because we set stream.info.Digest == "", this must have been computed afresh).
|
||||
switch d.operation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
case bpcOpPreserveOpaque:
|
||||
// No useful information
|
||||
case bpcOpCompressUncompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
case bpcOpDecompressCompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
|
||||
// We know one or two compressed digests. BlobInfoCache associates compression variants via the uncompressed digest,
|
||||
// and we don’t know that one.
|
||||
// That also means that repeated copies with the same recompression don’t identify reuse opportunities (unless
|
||||
// RecordDigestUncompressedPair was called for both compressed variants for some other reason).
|
||||
case bpcOpPreserveUncompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, srcInfo.Digest)
|
||||
case bpcOpInvalid:
|
||||
fallthrough
|
||||
default:
|
||||
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
|
||||
}
|
||||
@@ -286,7 +325,7 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms.
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression,
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
|
||||
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
|
||||
//
|
||||
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
|
||||
|
11
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
11
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
@@ -82,18 +82,11 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
||||
if in.forceManifestMIMEType != "" {
|
||||
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
|
||||
}
|
||||
|
||||
restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
if (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) &&
|
||||
(!restrictiveCompressionRequired || internalManifest.MIMETypeSupportsCompressionAlgorithm(srcType, *in.requestedCompressionFormat)) {
|
||||
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
|
||||
preferredMIMEType: srcType,
|
||||
otherMIMETypeCandidates: []string{},
|
||||
}, nil
|
||||
}
|
||||
destSupportedManifestMIMETypes = allManifestMIMETypes
|
||||
}
|
||||
|
||||
restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
|
||||
supportedByDest := set.New[string]()
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) {
|
||||
|
66
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
66
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@@ -33,6 +33,7 @@ type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src *image.SourcedImage
|
||||
manifestConversionPlan manifestConversionPlan
|
||||
diffIDsAreNeeded bool
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
canSubstituteBlobs bool
|
||||
@@ -136,7 +137,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
c: c,
|
||||
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
// manifestConversionPlan and diffIDsAreNeeded are computed later
|
||||
cannotModifyManifestReason: cannotModifyManifestReason,
|
||||
requireCompressionFormatMatch: opts.requireCompressionFormatMatch,
|
||||
}
|
||||
@@ -164,7 +165,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
|
||||
destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil
|
||||
|
||||
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
|
||||
ic.manifestConversionPlan, err = determineManifestConversion(determineManifestConversionInputs{
|
||||
srcMIMEType: ic.src.ManifestMIMEType,
|
||||
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
|
||||
forceManifestMIMEType: c.options.ForceManifestMIMEType,
|
||||
@@ -179,8 +180,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
|
||||
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
|
||||
// on the expected destination format.
|
||||
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
|
||||
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
|
||||
if ic.manifestConversionPlan.preferredMIMETypeNeedsConversion {
|
||||
ic.manifestUpdates.ManifestMIMEType = ic.manifestConversionPlan.preferredMIMEType
|
||||
}
|
||||
|
||||
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
|
||||
@@ -219,11 +220,11 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
manifestBytes, manifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||
wipResult := copySingleImageResult{
|
||||
manifest: manifestBytes,
|
||||
manifestMIMEType: manifestConversionPlan.preferredMIMEType,
|
||||
manifestMIMEType: ic.manifestConversionPlan.preferredMIMEType,
|
||||
manifestDigest: manifestDigest,
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
|
||||
logrus.Debugf("Writing manifest using preferred type %s failed: %v", ic.manifestConversionPlan.preferredMIMEType, err)
|
||||
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
|
||||
// because we failed to create a manifest of the specified type because the specific manifest type
|
||||
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
|
||||
@@ -232,13 +233,13 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
|
||||
isManifestRejected := errors.As(err, &manifestTypeRejectedError)
|
||||
isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
|
||||
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
|
||||
if (!isManifestRejected && !isCompressionIncompatible) || len(ic.manifestConversionPlan.otherMIMETypeCandidates) == 0 {
|
||||
// We don’t have other options.
|
||||
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
|
||||
// Don’t bother the user with MIME types if we have no choice.
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
|
||||
// If the original MIME type is acceptable, determineManifestConversion always uses it as ic.manifestConversionPlan.preferredMIMEType.
|
||||
// So if we are here, we will definitely be trying to convert the manifest.
|
||||
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
|
||||
// so let’s bail out early and with a better error message.
|
||||
@@ -247,8 +248,8 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
}
|
||||
|
||||
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
|
||||
errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
|
||||
for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
|
||||
errs := []string{fmt.Sprintf("%s(%v)", ic.manifestConversionPlan.preferredMIMEType, err)}
|
||||
for _, manifestMIMEType := range ic.manifestConversionPlan.otherMIMETypeCandidates {
|
||||
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
|
||||
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
|
||||
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
|
||||
@@ -382,7 +383,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
|
||||
if _, c := compressionEditsFromMIMEType(srcInfo); c != nil {
|
||||
compressionAlgos.Add(c.Name())
|
||||
}
|
||||
}
|
||||
@@ -635,17 +636,22 @@ type diffIDResult struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func compressionAlgorithmFromMIMEType(srcInfo types.BlobInfo) *compressiontypes.Algorithm {
|
||||
// compressionEditsFromMIMEType returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo, based on a MIME type of srcInfo.
|
||||
func compressionEditsFromMIMEType(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm) {
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
return &compression.Gzip
|
||||
return types.PreserveOriginal, &compression.Gzip
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return &compression.Zstd
|
||||
return types.PreserveOriginal, &compression.Zstd
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer:
|
||||
return types.Decompress, nil
|
||||
default:
|
||||
return types.PreserveOriginal, nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
|
||||
@@ -659,8 +665,8 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// which uses the compression information to compute the updated MediaType values.
|
||||
// (Sadly UpdatedImage() is documented to not update MediaTypes from
|
||||
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
|
||||
if srcInfo.CompressionAlgorithm == nil {
|
||||
srcInfo.CompressionAlgorithm = compressionAlgorithmFromMIMEType(srcInfo)
|
||||
if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil {
|
||||
srcInfo.CompressionOperation, srcInfo.CompressionAlgorithm = compressionEditsFromMIMEType(srcInfo)
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
@@ -683,17 +689,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
|
||||
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
|
||||
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
|
||||
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
|
||||
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
|
||||
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
|
||||
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
|
||||
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
|
||||
// the ImageDestination interface lets us pass in.
|
||||
|
||||
var requiredCompression *compressiontypes.Algorithm
|
||||
var originalCompression *compressiontypes.Algorithm
|
||||
if ic.requireCompressionFormatMatch {
|
||||
requiredCompression = ic.compressionFormat
|
||||
originalCompression = srcInfo.CompressionAlgorithm
|
||||
}
|
||||
|
||||
// Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
|
||||
@@ -703,14 +702,15 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
}
|
||||
|
||||
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
EmptyLayer: emptyLayer,
|
||||
LayerIndex: &layerIndex,
|
||||
SrcRef: srcRef,
|
||||
RequiredCompression: requiredCompression,
|
||||
OriginalCompression: originalCompression,
|
||||
TOCDigest: tocDigest,
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
EmptyLayer: emptyLayer,
|
||||
LayerIndex: &layerIndex,
|
||||
SrcRef: srcRef,
|
||||
PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...),
|
||||
RequiredCompression: requiredCompression,
|
||||
OriginalCompression: srcInfo.CompressionAlgorithm,
|
||||
TOCDigest: tocDigest,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
|
||||
|
2
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@@ -190,7 +190,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if info.Digest == "" {
|
||||
|
29
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
29
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/uploadreader"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/pkg/blobinfocache/none"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
v2 "github.com/docker/distribution/registry/api/v2"
|
||||
@@ -311,6 +312,13 @@ func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info t
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
|
||||
func optionalCompressionName(algo *compressiontypes.Algorithm) string {
|
||||
if algo != nil {
|
||||
return algo.Name()
|
||||
}
|
||||
return "nil"
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
@@ -321,7 +329,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
||||
if impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
if err != nil {
|
||||
@@ -331,11 +339,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
} else {
|
||||
requiredCompression := "nil"
|
||||
if options.OriginalCompression != nil {
|
||||
requiredCompression = options.OriginalCompression.Name()
|
||||
}
|
||||
logrus.Debugf("Ignoring exact blob match case due to compression mismatch ( %s vs %s )", options.RequiredCompression.Name(), requiredCompression)
|
||||
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
|
||||
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
@@ -355,15 +360,13 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
continue
|
||||
}
|
||||
}
|
||||
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
|
||||
requiredCompression := "nil"
|
||||
if compressionAlgorithm != nil {
|
||||
requiredCompression = compressionAlgorithm.Name()
|
||||
}
|
||||
if !impl.CandidateMatchesTryReusingBlobOptions(options, compressionAlgorithm) {
|
||||
if !candidate.UnknownLocation {
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
|
||||
logrus.Debugf("Ignoring candidate blob %s in %s, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(), candidateRepo.Name(),
|
||||
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
} else {
|
||||
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
|
||||
logrus.Debugf("Ignoring candidate blob %s with no known location, compression %s does not match required %s or MIME types %#v", candidate.Digest.String(),
|
||||
optionalCompressionName(compressionAlgorithm), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@@ -129,7 +129,7 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if err := d.archive.lock(); err != nil {
|
||||
|
38
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
38
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
@@ -1,25 +1,41 @@
|
||||
package impl
|
||||
|
||||
import (
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// BlobMatchesRequiredCompression validates if compression is required by the caller while selecting a blob, if it is required
|
||||
// CandidateMatchesTryReusingBlobOptions validates if compression is required by the caller while selecting a blob, if it is required
|
||||
// then function performs a match against the compression requested by the caller and compression of existing blob
|
||||
// (which can be nil to represent uncompressed or unknown)
|
||||
func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
|
||||
if options.RequiredCompression == nil {
|
||||
return true // no requirement imposed
|
||||
func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions, candidateCompression *compression.Algorithm) bool {
|
||||
if options.RequiredCompression != nil {
|
||||
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil || (options.RequiredCompression.Name() != candidateCompression.Name()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
|
||||
// For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”;
|
||||
// and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1),
|
||||
// so don’t impose any restrictions if candidateCompression == nil
|
||||
if options.PossibleManifestFormats != nil && candidateCompression != nil {
|
||||
if !slices.ContainsFunc(options.PossibleManifestFormats, func(mt string) bool {
|
||||
return manifest.MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression)
|
||||
}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func OriginalBlobMatchesRequiredCompression(opts private.TryReusingBlobOptions) bool {
|
||||
return BlobMatchesRequiredCompression(opts, opts.OriginalCompression)
|
||||
func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool {
|
||||
return CandidateMatchesTryReusingBlobOptions(opts, opts.OriginalCompression)
|
||||
}
|
||||
|
13
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
13
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@@ -112,12 +112,13 @@ type TryReusingBlobOptions struct {
|
||||
// Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers
|
||||
// if they use internal/imagedestination/impl.Compat;
|
||||
// in that case, they will all be consistently zero-valued.
|
||||
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
|
||||
OriginalCompression *compression.Algorithm // Must be set if RequiredCompression is set; can be set to nil to indicate “uncompressed” or “unknown”.
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob.
|
||||
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
|
||||
OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”.
|
||||
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
|
||||
}
|
||||
|
||||
// ReusedBlob is information about a blob reused in a destination.
|
||||
|
2
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@@ -173,7 +173,7 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if info.Digest == "" {
|
||||
|
2
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/ostree/ostree_dest.go
generated
vendored
@@ -335,7 +335,7 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
||||
// reflected in the manifest that will be written.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
if d.repo == nil {
|
||||
|
4
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
4
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
@@ -219,6 +219,10 @@ func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unver
|
||||
if hashedRekordV001.Data.Hash.Algorithm == nil {
|
||||
return time.Time{}, NewInvalidSignatureError(`Missing "data.hash.algorithm" field in hashedrekord`)
|
||||
}
|
||||
// FIXME: Rekor 1.3.5 has added SHA-386 and SHA-512 as recognized values.
|
||||
// Eventually we should support them as well; doing that cleanly would require updqating to Rekor 1.3.5, which requires Go 1.21.
|
||||
// Short-term, Cosign (as of 2024-02 and Cosign 2.2.3) only produces and accepts SHA-256, so right now that’s not a compatibility
|
||||
// issue.
|
||||
if *hashedRekordV001.Data.Hash.Algorithm != models.HashedrekordV001SchemaDataHashAlgorithmSha256 {
|
||||
return time.Time{}, NewInvalidSignatureError(fmt.Sprintf(`Unexpected "data.hash.algorithm" value %#v`, *hashedRekordV001.Data.Hash.Algorithm))
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
2
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@@ -307,7 +307,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
// If the blob has been successfully reused, returns (true, info, nil).
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
if !impl.OriginalBlobMatchesRequiredCompression(options) {
|
||||
if !impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
|
||||
|
4
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
4
vendor/github.com/containers/image/v5/types/types.go
generated
vendored
@@ -135,8 +135,8 @@ type BlobInfo struct {
|
||||
// CompressionOperation is used in Image.UpdateLayerInfos to instruct
|
||||
// whether the original layer's "compressed or not" should be preserved,
|
||||
// possibly while changing the compression algorithm from one to another,
|
||||
// or if it should be compressed or decompressed. The field defaults to
|
||||
// preserve the original layer's compressedness.
|
||||
// or if it should be changed to compressed or decompressed.
|
||||
// The field defaults to preserve the original layer's compressedness.
|
||||
// TODO: To remove together with CryptoOperation in re-design to remove
|
||||
// field out of BlobInfo.
|
||||
CompressionOperation LayerCompression
|
||||
|
Reference in New Issue
Block a user