mirror of
https://github.com/containers/skopeo.git
synced 2025-06-01 11:15:36 +00:00
Merge pull request #2394 from containers/renovate/github.com-containers-image-v5-5.x
fix(deps): update module github.com/containers/image/v5 to v5.32.1
This commit is contained in:
commit
d2357b38fa
14
go.mod
14
go.mod
@ -8,7 +8,7 @@ go 1.21.0
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/containers/common v0.60.0
|
||||
github.com/containers/image/v5 v5.32.0
|
||||
github.com/containers/image/v5 v5.32.1
|
||||
github.com/containers/ocicrypt v1.2.0
|
||||
github.com/containers/storage v1.55.0
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
@ -80,7 +80,7 @@ require (
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20240418210053-89b07f4543e0 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.22 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
@ -113,7 +113,7 @@ require (
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.7.4 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.7.5 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
@ -124,13 +124,13 @@ require (
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
golang.org/x/crypto v0.25.0 // indirect
|
||||
golang.org/x/crypto v0.26.0 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/net v0.26.0 // indirect
|
||||
golang.org/x/oauth2 v0.21.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
golang.org/x/oauth2 v0.22.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sys v0.23.0 // indirect
|
||||
golang.org/x/text v0.16.0 // indirect
|
||||
golang.org/x/text v0.17.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect
|
||||
google.golang.org/grpc v1.64.1 // indirect
|
||||
google.golang.org/protobuf v1.33.0 // indirect
|
||||
|
28
go.sum
28
go.sum
@ -39,8 +39,8 @@ github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containers/common v0.60.0 h1:QMNygqiiit9LU/yqee9Dv0N0oQ+rQq41ElfdOBQtw7w=
|
||||
github.com/containers/common v0.60.0/go.mod h1:dtKVe11xkV89tqzRX9s/B0ORjeB2dy5UB46aGjunMn8=
|
||||
github.com/containers/image/v5 v5.32.0 h1:yjbweazPfr8xOzQ2hkkYm1A2V0jN96/kES6Gwyxj7hQ=
|
||||
github.com/containers/image/v5 v5.32.0/go.mod h1:x5e0RDfGaY6bnQ13gJ2LqbfHvzssfB/y5a8HduGFxJc=
|
||||
github.com/containers/image/v5 v5.32.1 h1:fVa7GxRC4BCPGsfSRs4JY12WyeY26SUYQ0NuANaCFrI=
|
||||
github.com/containers/image/v5 v5.32.1/go.mod h1:v1l73VeMugfj/QtKI+jhYbwnwFCFnNGckvbST3rQ5Hk=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.2.0 h1:X14EgRK3xNFvJEfI5O4Qn4T3E25ANudSOZz/sirVuPM=
|
||||
@ -202,8 +202,8 @@ github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxec
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
@ -320,8 +320,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.7.4 h1:p4f16iMfUt3PkAC73SCzAtgtSf8TYDqEbJUT3odPrPo=
|
||||
github.com/vbauerster/mpb/v8 v8.7.4/go.mod h1:r1B5k2Ljj5KJFCekfihbiqyV4VaaRTANYmvWA2btufI=
|
||||
github.com/vbauerster/mpb/v8 v8.7.5 h1:hUF3zaNsuaBBwzEFoCvfuX3cpesQXZC0Phm/JcHZQ+c=
|
||||
github.com/vbauerster/mpb/v8 v8.7.5/go.mod h1:bRCnR7K+mj5WXKsy0NWB6Or+wctYGvVwKn6huwvxKa0=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -371,8 +371,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
|
||||
golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
|
||||
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
|
||||
@ -401,8 +401,8 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=
|
||||
golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA=
|
||||
golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -410,8 +410,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@ -439,8 +439,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
|
||||
golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
161
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
161
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
"github.com/containers/image/v5/pkg/compression"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -34,10 +35,10 @@ var (
|
||||
|
||||
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
|
||||
type bpDetectCompressionStepData struct {
|
||||
isCompressed bool
|
||||
format compressiontypes.Algorithm // Valid if isCompressed
|
||||
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
|
||||
srcCompressorName string // Compressor name to possibly record in the blob info cache for the source blob.
|
||||
isCompressed bool
|
||||
format compressiontypes.Algorithm // Valid if isCompressed
|
||||
decompressor compressiontypes.DecompressorFunc // Valid if isCompressed
|
||||
srcCompressorBaseVariantName string // Compressor name to possibly record in the blob info cache for the source blob.
|
||||
}
|
||||
|
||||
// blobPipelineDetectCompressionStep updates *stream to detect its current compression format.
|
||||
@ -51,15 +52,25 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
}
|
||||
stream.reader = reader
|
||||
|
||||
if decompressor != nil && format.Name() == compressiontypes.ZstdAlgorithmName {
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return bpDetectCompressionStepData{}, err
|
||||
}
|
||||
if tocDigest != nil {
|
||||
format = compression.ZstdChunked
|
||||
}
|
||||
|
||||
}
|
||||
res := bpDetectCompressionStepData{
|
||||
isCompressed: decompressor != nil,
|
||||
format: format,
|
||||
decompressor: decompressor,
|
||||
}
|
||||
if res.isCompressed {
|
||||
res.srcCompressorName = format.Name()
|
||||
res.srcCompressorBaseVariantName = format.BaseVariantName()
|
||||
} else {
|
||||
res.srcCompressorName = internalblobinfocache.Uncompressed
|
||||
res.srcCompressorBaseVariantName = internalblobinfocache.Uncompressed
|
||||
}
|
||||
|
||||
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
|
||||
@ -70,13 +81,14 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
|
||||
// bpCompressionStepData contains data that the copy pipeline needs about the compression step.
|
||||
type bpCompressionStepData struct {
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorName string // Compressor name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorName string // Compressor name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
operation bpcOperation // What we are actually doing
|
||||
uploadedOperation types.LayerCompression // Operation to use for updating the blob metadata (matching the end state, not necessarily what we do)
|
||||
uploadedAlgorithm *compressiontypes.Algorithm // An algorithm parameter for the compressionOperation edits.
|
||||
uploadedAnnotations map[string]string // Compression-related annotations that should be set on the uploaded blob. WARNING: This is only set after the srcStream.reader is fully consumed.
|
||||
srcCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the source blob.
|
||||
uploadedCompressorBaseVariantName string // Compressor base variant name to record in the blob info cache for the uploaded blob.
|
||||
uploadedCompressorSpecificVariantName string // Compressor specific variant name to record in the blob info cache for the uploaded blob.
|
||||
closers []io.Closer // Objects to close after the upload is done, if any.
|
||||
}
|
||||
|
||||
type bpcOperation int
|
||||
@ -128,11 +140,12 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
|
||||
// We can’t do anything with an encrypted blob unless decrypted.
|
||||
logrus.Debugf("Using original blob without modification for encrypted blob")
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorName: internalblobinfocache.UnknownCompression,
|
||||
operation: bpcOpPreserveOpaque,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@ -156,14 +169,19 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
specificVariantName := uploadedAlgorithm.Name()
|
||||
if specificVariantName == uploadedAlgorithm.BaseVariantName() {
|
||||
specificVariantName = internalblobinfocache.UnknownCompression
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: uploadedAlgorithm.Name(),
|
||||
closers: []io.Closer{reader},
|
||||
operation: bpcOpCompressUncompressed,
|
||||
uploadedOperation: types.Compress,
|
||||
uploadedAlgorithm: uploadedAlgorithm,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: uploadedAlgorithm.BaseVariantName(),
|
||||
uploadedCompressorSpecificVariantName: specificVariantName,
|
||||
closers: []io.Closer{reader},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@ -196,15 +214,20 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
}
|
||||
specificVariantName := ic.compressionFormat.Name()
|
||||
if specificVariantName == ic.compressionFormat.BaseVariantName() {
|
||||
specificVariantName = internalblobinfocache.UnknownCompression
|
||||
}
|
||||
succeeded = true
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: ic.compressionFormat.Name(),
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
operation: bpcOpRecompressCompressed,
|
||||
uploadedOperation: types.PreserveOriginal,
|
||||
uploadedAlgorithm: ic.compressionFormat,
|
||||
uploadedAnnotations: annotations,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: ic.compressionFormat.BaseVariantName(),
|
||||
uploadedCompressorSpecificVariantName: specificVariantName,
|
||||
closers: []io.Closer{decompressed, recompressed},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@ -225,12 +248,13 @@ func (ic *imageCopier) bpcDecompressCompressed(stream *sourceStream, detected bp
|
||||
Size: -1,
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: internalblobinfocache.Uncompressed,
|
||||
closers: []io.Closer{s},
|
||||
operation: bpcOpDecompressCompressed,
|
||||
uploadedOperation: types.Decompress,
|
||||
uploadedAlgorithm: nil,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorBaseVariantName: internalblobinfocache.Uncompressed,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
closers: []io.Closer{s},
|
||||
}, nil
|
||||
}
|
||||
return nil, nil
|
||||
@ -268,11 +292,15 @@ func (ic *imageCopier) bpcPreserveOriginal(_ *sourceStream, detected bpDetectCom
|
||||
algorithm = nil
|
||||
}
|
||||
return &bpCompressionStepData{
|
||||
operation: bpcOp,
|
||||
uploadedOperation: uploadedOp,
|
||||
uploadedAlgorithm: algorithm,
|
||||
srcCompressorName: detected.srcCompressorName,
|
||||
uploadedCompressorName: detected.srcCompressorName,
|
||||
operation: bpcOp,
|
||||
uploadedOperation: uploadedOp,
|
||||
uploadedAlgorithm: algorithm,
|
||||
srcCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
// We only record the base variant of the format on upload; we didn’t do anything with
|
||||
// the TOC, we don’t know whether it matches the blob digest, so we don’t want to trigger
|
||||
// reuse of any kind between the blob digest and the TOC digest.
|
||||
uploadedCompressorBaseVariantName: detected.srcCompressorBaseVariantName,
|
||||
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
|
||||
}
|
||||
}
|
||||
|
||||
@ -308,6 +336,15 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
// No useful information
|
||||
case bpcOpCompressUncompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
if d.uploadedAnnotations != nil {
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(d.uploadedAnnotations)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing just-created compression annotations: %w", err)
|
||||
}
|
||||
if tocDigest != nil {
|
||||
c.blobInfoCache.RecordTOCUncompressedPair(*tocDigest, srcInfo.Digest)
|
||||
}
|
||||
}
|
||||
case bpcOpDecompressCompressed:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
case bpcOpRecompressCompressed, bpcOpPreserveCompressed:
|
||||
@ -323,29 +360,27 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
|
||||
return fmt.Errorf("Internal error: Unexpected d.operation value %#v", d.operation)
|
||||
}
|
||||
}
|
||||
if d.srcCompressorName == "" || d.uploadedCompressorName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src: %q, uploaded: %q)",
|
||||
d.srcCompressorName, d.uploadedCompressorName)
|
||||
if d.srcCompressorBaseVariantName == "" || d.uploadedCompressorBaseVariantName == "" || d.uploadedCompressorSpecificVariantName == "" {
|
||||
return fmt.Errorf("internal error: missing compressor names (src base: %q, uploaded base: %q, uploaded specific: %q)",
|
||||
d.srcCompressorBaseVariantName, d.uploadedCompressorBaseVariantName, d.uploadedCompressorSpecificVariantName)
|
||||
}
|
||||
if d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms.
|
||||
// There is already a similar hack in internal/imagedestination/impl/helpers.CandidateMatchesTryReusingBlobOptions,
|
||||
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
|
||||
//
|
||||
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
|
||||
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
|
||||
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
|
||||
// inconsistent data to be logged.
|
||||
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
|
||||
}
|
||||
if d.uploadedCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
|
||||
c.blobInfoCache.RecordDigestCompressorData(uploadedInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.uploadedCompressorBaseVariantName,
|
||||
SpecificVariantCompressor: d.uploadedCompressorSpecificVariantName,
|
||||
SpecificVariantAnnotations: d.uploadedAnnotations,
|
||||
})
|
||||
}
|
||||
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
|
||||
d.srcCompressorName != internalblobinfocache.UnknownCompression {
|
||||
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Don’t record zstd:chunked algorithms, see above.
|
||||
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
|
||||
}
|
||||
d.srcCompressorBaseVariantName != internalblobinfocache.UnknownCompression {
|
||||
// If the source is already using some TOC-dependent variant, we either copied the
|
||||
// blob as is, or perhaps decompressed it; either way we don’t trust the TOC digest,
|
||||
// so record neither the variant name, nor the TOC digest.
|
||||
c.blobInfoCache.RecordDigestCompressorData(srcInfo.Digest, internalblobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: d.srcCompressorBaseVariantName,
|
||||
SpecificVariantCompressor: internalblobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/encryption.go
generated
vendored
@ -48,7 +48,7 @@ func (ic *imageCopier) blobPipelineDecryptionStep(stream *sourceStream, srcInfo
|
||||
Annotations: stream.info.Annotations,
|
||||
}
|
||||
// DecryptLayer supposedly returns a digest of the decrypted stream.
|
||||
// In pratice, that value is never set in the current implementation.
|
||||
// In practice, that value is never set in the current implementation.
|
||||
// And we shouldn’t use it anyway, because it is not trusted: encryption can be made to a public key,
|
||||
// i.e. it doesn’t authenticate the origin of the metadata in any way.
|
||||
reader, _, err := ocicrypt.DecryptLayer(ic.c.options.OciDecryptConfig, stream.reader, desc, false)
|
||||
|
2
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
2
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
@ -121,7 +121,7 @@ func (c *copier) printCopyInfo(kind string, info types.BlobInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
// mark100PercentComplete marks the progres bars as 100% complete;
|
||||
// mark100PercentComplete marks the progress bars as 100% complete;
|
||||
// it may do so by possibly advancing the current state if it is below the known total.
|
||||
func (bar *progressBar) mark100PercentComplete() {
|
||||
if bar.originalSize > 0 {
|
||||
|
53
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
53
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"maps"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
@ -149,6 +150,28 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
ic.compressionFormat = c.options.DestinationCtx.CompressionFormat
|
||||
ic.compressionLevel = c.options.DestinationCtx.CompressionLevel
|
||||
}
|
||||
// HACK: Don’t combine zstd:chunked and encryption.
|
||||
// zstd:chunked can only usefully be consumed using range requests of parts of the layer, which would require the encryption
|
||||
// to support decrypting arbitrary subsets of the stream. That’s plausible but not supported using the encryption API we have.
|
||||
// Also, the chunked metadata is exposed in annotations unencrypted, which reveals the TOC digest = layer identity without
|
||||
// encryption. (That can be determined from the unencrypted config anyway, but, still...)
|
||||
//
|
||||
// Ideally this should query a well-defined property of the compression algorithm (and $somehow determine the right fallback) instead of
|
||||
// hard-coding zstd:chunked / zstd.
|
||||
if ic.c.options.OciEncryptLayers != nil {
|
||||
format := ic.compressionFormat
|
||||
if format == nil {
|
||||
format = defaultCompressionFormat
|
||||
}
|
||||
if format.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
if ic.requireCompressionFormatMatch {
|
||||
return copySingleImageResult{}, errors.New("explicitly requested to combine zstd:chunked with encryption, which is not beneficial; use plain zstd instead")
|
||||
}
|
||||
logrus.Warnf("Compression using zstd:chunked is not beneficial for encrypted layers, using plain zstd instead")
|
||||
ic.compressionFormat = &compression.Zstd
|
||||
}
|
||||
}
|
||||
|
||||
// Decide whether we can substitute blobs with semantic equivalents:
|
||||
// - Don’t do that if we can’t modify the manifest at all
|
||||
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
@ -192,7 +215,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
|
||||
noPendingManifestUpdates := ic.noPendingManifestUpdates()
|
||||
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for resuing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
|
||||
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t, compression match required for reusing blobs=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates, opts.requireCompressionFormatMatch)
|
||||
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates && !ic.requireCompressionFormatMatch {
|
||||
matchedResult, err := ic.compareImageDestinationManifestEqual(ctx, targetInstance)
|
||||
if err != nil {
|
||||
@ -866,21 +889,33 @@ func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.Reuse
|
||||
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
|
||||
// of the generic code in this package.
|
||||
res := types.BlobInfo{
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
Annotations: inputInfo.Annotations, // FIXME: This should remove zstd:chunked annotations (but those annotations being left with incorrect values should not break pulls)
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
Digest: reusedBlob.Digest,
|
||||
Size: reusedBlob.Size,
|
||||
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
|
||||
// FIXME: This should remove zstd:chunked annotations IF the original was chunked and the new one isn’t
|
||||
// (but those annotations being left with incorrect values should not break pulls).
|
||||
Annotations: maps.Clone(inputInfo.Annotations),
|
||||
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
|
||||
CompressionOperation: reusedBlob.CompressionOperation,
|
||||
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
|
||||
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
|
||||
}
|
||||
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
|
||||
// if the blob was substituted; otherwise, fill it in based
|
||||
// if the blob was substituted; otherwise, it is optional, and if not set, fill it in based
|
||||
// on what we know from the srcInfos we were given.
|
||||
if reusedBlob.Digest == inputInfo.Digest {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
if res.CompressionOperation == types.PreserveOriginal {
|
||||
res.CompressionOperation = inputInfo.CompressionOperation
|
||||
}
|
||||
if res.CompressionAlgorithm == nil {
|
||||
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
|
||||
}
|
||||
}
|
||||
if len(reusedBlob.CompressionAnnotations) != 0 {
|
||||
if res.Annotations == nil {
|
||||
res.Annotations = map[string]string{}
|
||||
}
|
||||
maps.Copy(res.Annotations, reusedBlob.CompressionAnnotations)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
22
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
22
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -332,6 +332,7 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
|
||||
}
|
||||
|
||||
originalCandidateKnownToBeMissing := false
|
||||
if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) {
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
|
||||
@ -341,9 +342,17 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
if haveBlob {
|
||||
return true, reusedInfo, nil
|
||||
}
|
||||
originalCandidateKnownToBeMissing = true
|
||||
} else {
|
||||
logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v",
|
||||
optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats)
|
||||
// We can get here with a blob detected to be zstd when the user wants a zstd:chunked.
|
||||
// In that case we keep originalCandiateKnownToBeMissing = false, so that if we find
|
||||
// a BIC entry for this blob, we do use that entry and return a zstd:chunked entry
|
||||
// with the BIC’s annotations.
|
||||
// This is not quite correct, it only works if the BIC also contains an acceptable _location_.
|
||||
// Ideally, we could look up just the compression algorithm/annotations for info.digest,
|
||||
// and use it even if no location candidate exists and the original dandidate is present.
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
@ -387,7 +396,8 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
// for it in the current repo.
|
||||
candidateRepo = reference.TrimNamed(d.ref.ref)
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
if originalCandidateKnownToBeMissing &&
|
||||
candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
@ -427,10 +437,12 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
|
||||
return true, private.ReusedBlob{
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm}, nil
|
||||
Digest: candidate.Digest,
|
||||
Size: size,
|
||||
CompressionOperation: candidate.CompressionOperation,
|
||||
CompressionAlgorithm: candidate.CompressionAlgorithm,
|
||||
CompressionAnnotations: candidate.CompressionAnnotations,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return false, private.ReusedBlob{}, nil
|
||||
|
9
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
9
vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go
generated
vendored
@ -27,7 +27,14 @@ func (bic *v1OnlyBlobInfoCache) Open() {
|
||||
func (bic *v1OnlyBlobInfoCache) Close() {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) {
|
||||
}
|
||||
|
||||
func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 {
|
||||
|
42
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
42
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
@ -26,19 +26,40 @@ type BlobInfoCache2 interface {
|
||||
// Close destroys state created by Open().
|
||||
Close()
|
||||
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest)
|
||||
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
|
||||
RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData)
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2
|
||||
}
|
||||
|
||||
// DigestCompressorData is information known about how a blob is compressed.
|
||||
// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.)
|
||||
type DigestCompressorData struct {
|
||||
BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression.
|
||||
// The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression:
|
||||
SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant)
|
||||
SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant.
|
||||
}
|
||||
|
||||
// CandidateLocations2Options are used in CandidateLocations2.
|
||||
type CandidateLocations2Options struct {
|
||||
// If !CanSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
@ -51,9 +72,10 @@ type CandidateLocations2Options struct {
|
||||
|
||||
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
|
||||
type BICReplacementCandidate2 struct {
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
Digest digest.Digest
|
||||
CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm
|
||||
UnknownLocation bool // is true when `Location` for this blob is not set
|
||||
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@ -76,6 +76,9 @@ func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.Blob
|
||||
Size: blob.Size,
|
||||
CompressionOperation: blob.CompressionOperation,
|
||||
CompressionAlgorithm: blob.CompressionAlgorithm,
|
||||
// CompressionAnnotations could be set to blob.Annotations, but that may contain unrelated
|
||||
// annotations, and we didn’t use the blob.Annotations field previously, so we’ll
|
||||
// continue not using it.
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
5
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
@ -205,11 +205,6 @@ type ReuseConditions struct {
|
||||
// (which can be nil to represent uncompressed or unknown) matches reuseConditions.
|
||||
func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool {
|
||||
if c.RequiredCompression != nil {
|
||||
if c.RequiredCompression.Name() == compressiontypes.ZstdChunkedAlgorithmName {
|
||||
// HACK: Never match when the caller asks for zstd:chunked, because we don’t record the annotations required to use the chunked blobs.
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil ||
|
||||
(c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
|
5
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
5
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@ -134,9 +134,14 @@ type ReusedBlob struct {
|
||||
Size int64 // Must be provided
|
||||
// The following compression fields should be set when the reuse substitutes
|
||||
// a differently-compressed blob.
|
||||
// They may be set also to change from a base variant to a specific variant of an algorithm.
|
||||
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
|
||||
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
|
||||
|
||||
// Annotations that should be added, for CompressionAlgorithm. Note that they might need to be
|
||||
// added even if the digest doesn’t change (if we found the annotations in a cache).
|
||||
CompressionAnnotations map[string]string
|
||||
|
||||
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/docker_schema2.go
generated
vendored
@ -202,7 +202,7 @@ func (m *Schema2) ConfigInfo() types.BlobInfo {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *Schema2) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
blobs := make([]LayerInfo, 0, len(m.LayersDescriptors))
|
||||
for _, layer := range m.LayersDescriptors {
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromSchema2Descriptor(layer),
|
||||
|
2
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@ -95,7 +95,7 @@ func (m *OCI1) ConfigInfo() types.BlobInfo {
|
||||
// The Digest field is guaranteed to be provided; Size may be -1.
|
||||
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
||||
func (m *OCI1) LayerInfos() []LayerInfo {
|
||||
blobs := []LayerInfo{}
|
||||
blobs := make([]LayerInfo, 0, len(m.Layers))
|
||||
for _, layer := range m.Layers {
|
||||
blobs = append(blobs, LayerInfo{
|
||||
BlobInfo: BlobInfoFromOCI1Descriptor(layer),
|
||||
|
6
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
6
vendor/github.com/containers/image/v5/ostree/ostree_src.go
generated
vendored
@ -151,9 +151,9 @@ func openRepo(path string) (*C.struct_OstreeRepo, error) {
|
||||
var cerr *C.GError
|
||||
cpath := C.CString(path)
|
||||
defer C.free(unsafe.Pointer(cpath))
|
||||
pathc := C.g_file_new_for_path(cpath)
|
||||
defer C.g_object_unref(C.gpointer(pathc))
|
||||
repo := C.ostree_repo_new(pathc)
|
||||
file := C.g_file_new_for_path(cpath)
|
||||
defer C.g_object_unref(C.gpointer(file))
|
||||
repo := C.ostree_repo_new(file)
|
||||
r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr)))
|
||||
if !r {
|
||||
C.g_object_unref(C.gpointer(repo))
|
||||
|
164
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
164
vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go
generated
vendored
@ -25,57 +25,133 @@ const replacementAttempts = 5
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementUnknownLocationAttempts = 2
|
||||
|
||||
// CandidateCompression returns (true, compressionOp, compressionAlgo) if a blob
|
||||
// with compressionName (which can be Uncompressed or UnknownCompression) is acceptable for a CandidateLocations* call with v2Options.
|
||||
// CandidateTemplate is a subset of BICReplacementCandidate2 with data related to a specific digest,
|
||||
// which can be later combined with information about a location.
|
||||
type CandidateTemplate struct {
|
||||
digest digest.Digest
|
||||
compressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed
|
||||
compressionAlgorithm *compression.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed
|
||||
compressionAnnotations map[string]string // If necessary, annotations necessary to use compressionAlgorithm
|
||||
}
|
||||
|
||||
// CandidateTemplateWithCompression returns a CandidateTemplate if a blob with data is acceptable
|
||||
// for a CandidateLocations* call with v2Options.
|
||||
//
|
||||
// v2Options can be set to nil if the call is CandidateLocations (i.e. compression is not required to be known);
|
||||
// if not nil, the call is assumed to be CandidateLocations2.
|
||||
//
|
||||
// The (compressionOp, compressionAlgo) values are suitable for BICReplacementCandidate2
|
||||
func CandidateCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, compressorName string) (bool, types.LayerCompression, *compression.Algorithm) {
|
||||
func CandidateTemplateWithCompression(v2Options *blobinfocache.CandidateLocations2Options, digest digest.Digest, data blobinfocache.DigestCompressorData) *CandidateTemplate {
|
||||
if v2Options == nil {
|
||||
return true, types.PreserveOriginal, nil // Anything goes. The (compressionOp, compressionAlgo) values are not used.
|
||||
return &CandidateTemplate{ // Anything goes. The compressionOperation, compressionAlgorithm and compressionAnnotations values are not used.
|
||||
digest: digest,
|
||||
}
|
||||
}
|
||||
|
||||
var op types.LayerCompression
|
||||
var algo *compression.Algorithm
|
||||
switch compressorName {
|
||||
requiredCompression := "nil"
|
||||
if v2Options.RequiredCompression != nil {
|
||||
requiredCompression = v2Options.RequiredCompression.Name()
|
||||
}
|
||||
switch data.BaseVariantCompressor {
|
||||
case blobinfocache.Uncompressed:
|
||||
op = types.Decompress
|
||||
algo = nil
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, nil) {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, uncompressed format does not match required %s or MIME types %#v",
|
||||
digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
|
||||
return nil
|
||||
}
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Decompress,
|
||||
compressionAlgorithm: nil,
|
||||
compressionAnnotations: nil,
|
||||
}
|
||||
case blobinfocache.UnknownCompression:
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unknown compression", digest.String())
|
||||
return false, types.PreserveOriginal, nil // Not allowed with CandidateLocations2
|
||||
return nil // Not allowed with CandidateLocations2
|
||||
default:
|
||||
op = types.Compress
|
||||
algo_, err := compression.AlgorithmByName(compressorName)
|
||||
// See if we can use the specific variant, first.
|
||||
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
algo, err := compression.AlgorithmByName(data.SpecificVariantCompressor)
|
||||
if err != nil {
|
||||
logrus.Debugf("Not considering unrecognized specific compression variant %q for BlobInfoCache record of digest %q: %v",
|
||||
data.SpecificVariantCompressor, digest.String(), err)
|
||||
} else {
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, &algo) {
|
||||
logrus.Debugf("Ignoring specific compression variant %q for BlobInfoCache record of digest %q, it does not match required %s or MIME types %#v",
|
||||
data.SpecificVariantCompressor, digest.String(), requiredCompression, v2Options.PossibleManifestFormats)
|
||||
} else {
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
compressionAnnotations: data.SpecificVariantAnnotations,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try the base variant.
|
||||
algo, err := compression.AlgorithmByName(data.BaseVariantCompressor)
|
||||
if err != nil {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q with unrecognized compression %q: %v",
|
||||
digest.String(), compressorName, err)
|
||||
return false, types.PreserveOriginal, nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
|
||||
digest.String(), data.BaseVariantCompressor, err)
|
||||
return nil // The BICReplacementCandidate2.CompressionAlgorithm field is required
|
||||
}
|
||||
algo = &algo_
|
||||
}
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, algo) {
|
||||
requiredCompresssion := "nil"
|
||||
if v2Options.RequiredCompression != nil {
|
||||
requiredCompresssion = v2Options.RequiredCompression.Name()
|
||||
if !manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{
|
||||
PossibleManifestFormats: v2Options.PossibleManifestFormats,
|
||||
RequiredCompression: v2Options.RequiredCompression,
|
||||
}, &algo) {
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
|
||||
digest.String(), data.BaseVariantCompressor, requiredCompression, v2Options.PossibleManifestFormats)
|
||||
return nil
|
||||
}
|
||||
return &CandidateTemplate{
|
||||
digest: digest,
|
||||
compressionOperation: types.Compress,
|
||||
compressionAlgorithm: &algo,
|
||||
compressionAnnotations: nil,
|
||||
}
|
||||
logrus.Debugf("Ignoring BlobInfoCache record of digest %q, compression %q does not match required %s or MIME types %#v",
|
||||
digest.String(), compressorName, requiredCompresssion, v2Options.PossibleManifestFormats)
|
||||
return false, types.PreserveOriginal, nil
|
||||
}
|
||||
|
||||
return true, op, algo
|
||||
}
|
||||
|
||||
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type CandidateWithTime struct {
|
||||
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
|
||||
lastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
|
||||
}
|
||||
|
||||
// CandidateWithLocation returns a complete CandidateWithTime combining (template from CandidateTemplateWithCompression, location, lastSeen)
|
||||
func (template CandidateTemplate) CandidateWithLocation(location types.BICLocationReference, lastSeen time.Time) CandidateWithTime {
|
||||
return CandidateWithTime{
|
||||
candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
CompressionAnnotations: template.compressionAnnotations,
|
||||
UnknownLocation: false,
|
||||
Location: location,
|
||||
},
|
||||
lastSeen: lastSeen,
|
||||
}
|
||||
}
|
||||
|
||||
// CandidateWithUnknownLocation returns a complete CandidateWithTime for a template from CandidateTemplateWithCompression and an unknown location.
|
||||
func (template CandidateTemplate) CandidateWithUnknownLocation() CandidateWithTime {
|
||||
return CandidateWithTime{
|
||||
candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: template.digest,
|
||||
CompressionOperation: template.compressionOperation,
|
||||
CompressionAlgorithm: template.compressionAlgorithm,
|
||||
CompressionAnnotations: template.compressionAnnotations,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
lastSeen: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// candidateSortState is a closure for a comparison used by slices.SortFunc on candidates to prioritize,
|
||||
@ -91,35 +167,35 @@ func (css *candidateSortState) compare(xi, xj CandidateWithTime) int {
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.Candidate.Digest != xj.Candidate.Digest {
|
||||
if xi.candidate.Digest != xj.candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.Candidate.Digest == css.primaryDigest {
|
||||
if xi.candidate.Digest == css.primaryDigest {
|
||||
return -1
|
||||
}
|
||||
if xj.Candidate.Digest == css.primaryDigest {
|
||||
if xj.candidate.Digest == css.primaryDigest {
|
||||
return 1
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.Candidate.Digest == css.uncompressedDigest {
|
||||
if xi.candidate.Digest == css.uncompressedDigest {
|
||||
return 1
|
||||
}
|
||||
if xj.Candidate.Digest == css.uncompressedDigest {
|
||||
if xj.candidate.Digest == css.uncompressedDigest {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
} else { // xi.Candidate.Digest == xj.Candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.Candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.Candidate.Digest == css.uncompressedDigest) {
|
||||
return -xi.LastSeen.Compare(xj.LastSeen)
|
||||
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
|
||||
return -xi.lastSeen.Compare(xj.lastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if cmp := xi.LastSeen.Compare(xj.LastSeen); cmp != 0 { // Order primarily by time
|
||||
if cmp := xi.lastSeen.Compare(xj.lastSeen); cmp != 0 { // Order primarily by time
|
||||
return -cmp
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return cmp.Compare(xi.Candidate.Digest, xj.Candidate.Digest)
|
||||
return cmp.Compare(xi.candidate.Digest, xj.candidate.Digest)
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
@ -138,7 +214,7 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
}).compare)
|
||||
for _, candidate := range cs {
|
||||
if candidate.Candidate.UnknownLocation {
|
||||
if candidate.candidate.UnknownLocation {
|
||||
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
|
||||
} else {
|
||||
knownLocationCandidates = append(knownLocationCandidates, candidate)
|
||||
@ -150,11 +226,11 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
|
||||
unknownLocationCandidatesUsed := min(noLocationLimit, remainingCapacity, len(unknownLocationCandidates))
|
||||
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
|
||||
for i := 0; i < knownLocationCandidatesUsed; i++ {
|
||||
res[i] = knownLocationCandidates[i].Candidate
|
||||
res[i] = knownLocationCandidates[i].candidate
|
||||
}
|
||||
// If candidates with unknown location are found, lets add them to final list
|
||||
for i := 0; i < unknownLocationCandidatesUsed; i++ {
|
||||
res = append(res, unknownLocationCandidates[i].Candidate)
|
||||
res = append(res, unknownLocationCandidates[i].candidate)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
114
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
114
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@ -24,10 +24,11 @@ type locationKey struct {
|
||||
type cache struct {
|
||||
mutex sync.Mutex
|
||||
// The following fields can only be accessed with mutex held.
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Uncompressed (not blobinfocache.UnknownCompression), for each digest
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
uncompressedDigestsByTOC map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
compressors map[digest.Digest]blobinfocache.DigestCompressorData // stores compression data for each digest; BaseVariantCompressor != UnknownCompression
|
||||
}
|
||||
|
||||
// New returns a BlobInfoCache implementation which is in-memory only.
|
||||
@ -44,10 +45,11 @@ func New() types.BlobInfoCache {
|
||||
|
||||
func new2() *cache {
|
||||
return &cache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]string{},
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
uncompressedDigestsByTOC: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]*set.Set[digest.Digest]{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
compressors: map[digest.Digest]blobinfocache.DigestCompressorData{},
|
||||
}
|
||||
}
|
||||
|
||||
@ -104,6 +106,30 @@ func (mem *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
|
||||
anyDigestSet.Add(anyDigest)
|
||||
}
|
||||
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
func (mem *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if d, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok {
|
||||
return d
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.uncompressedDigestsByTOC[tocDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
|
||||
}
|
||||
mem.uncompressedDigestsByTOC[tocDigest] = uncompressed
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
@ -118,19 +144,40 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records that the blob with the specified digest is either compressed with the specified
|
||||
// algorithm, or uncompressed, or that we no longer know.
|
||||
func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) {
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
//
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
func (mem *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
|
||||
mem.mutex.Lock()
|
||||
defer mem.mutex.Unlock()
|
||||
if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName)
|
||||
if previous, ok := mem.compressors[anyDigest]; ok {
|
||||
if previous.BaseVariantCompressor != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Base compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.BaseVariantCompressor, data.BaseVariantCompressor)
|
||||
} else if previous.SpecificVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor != blobinfocache.UnknownCompression &&
|
||||
previous.SpecificVariantCompressor != data.SpecificVariantCompressor {
|
||||
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous.SpecificVariantCompressor, data.SpecificVariantCompressor)
|
||||
}
|
||||
// We don’t check SpecificVariantAnnotations for equality, it’s possible that their generation is not deterministic.
|
||||
|
||||
// Preserve specific variant information if the incoming data does not have it.
|
||||
if data.BaseVariantCompressor != blobinfocache.UnknownCompression && data.SpecificVariantCompressor == blobinfocache.UnknownCompression &&
|
||||
previous.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
data.SpecificVariantCompressor = previous.SpecificVariantCompressor
|
||||
data.SpecificVariantAnnotations = previous.SpecificVariantAnnotations
|
||||
}
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, blobDigest)
|
||||
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
|
||||
delete(mem.compressors, anyDigest)
|
||||
return
|
||||
}
|
||||
mem.compressors[blobDigest] = compressorName
|
||||
mem.compressors[anyDigest] = data
|
||||
}
|
||||
|
||||
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
|
||||
@ -140,38 +187,25 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
|
||||
// with unknown compression.
|
||||
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) []prioritize.CandidateWithTime {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressorName = v
|
||||
compressionData := blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
}
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
if v, ok := mem.compressors[digest]; ok {
|
||||
compressionData = v
|
||||
}
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
|
||||
if template == nil {
|
||||
return candidates
|
||||
}
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
if len(locations) > 0 {
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: l,
|
||||
},
|
||||
LastSeen: t,
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithLocation(l, t))
|
||||
}
|
||||
} else if v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithUnknownLocation())
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
13
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
13
vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go
generated
vendored
@ -34,6 +34,19 @@ func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
|
230
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
230
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
@ -3,6 +3,7 @@ package sqlite
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
@ -295,6 +296,24 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
|
||||
`PRIMARY KEY (transport, scope, digest, location)
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"DigestTOCUncompressedPairs",
|
||||
`CREATE TABLE IF NOT EXISTS DigestTOCUncompressedPairs(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`tocDigest TEXT PRIMARY KEY NOT NULL,` +
|
||||
`uncompressedDigest TEXT NOT NULL
|
||||
)`,
|
||||
},
|
||||
{
|
||||
"DigestSpecificVariantCompressors", // If changing the schema incompatibly, merge this with DigestCompressors.
|
||||
`CREATE TABLE IF NOT EXISTS DigestSpecificVariantCompressors(` +
|
||||
// index implied by PRIMARY KEY
|
||||
`digest TEXT PRIMARY KEY NOT NULL,` +
|
||||
// The compressor is not `UnknownCompression`.
|
||||
`specificVariantCompressor TEXT NOT NULL,
|
||||
specificVariantAnnotations BLOB NOT NULL
|
||||
)`,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := dbTransaction(db, func(tx *sql.Tx) (void, error) {
|
||||
@ -385,6 +404,57 @@ func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompre
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest.
|
||||
// Returns "" if the uncompressed digest is unknown.
|
||||
func (sqc *cache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest {
|
||||
res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) {
|
||||
uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if found {
|
||||
d, err := digest.Parse(uncompressedString)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return d, nil
|
||||
|
||||
}
|
||||
return "", nil
|
||||
})
|
||||
if err != nil {
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (sqc *cache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestTOCUncompressedPairs WHERE tocDigest = ?", tocDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for uncompressed digest for blob with TOC %q", tocDigest)
|
||||
}
|
||||
if gotPrevious {
|
||||
previous, err := digest.Parse(previousString)
|
||||
if err != nil {
|
||||
return void{}, err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob with TOC %q previously recorded as %q, now %q", tocDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestTOCUncompressedPairs(tocDigest, uncompressedDigest) VALUES (?, ?)",
|
||||
tocDigest.String(), uncompressed.String()); err != nil {
|
||||
return void{}, fmt.Errorf("recording uncompressed digest %q for blob with TOC %q: %w", uncompressed, tocDigest, err)
|
||||
}
|
||||
return void{}, nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) {
|
||||
@ -398,29 +468,58 @@ func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope type
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordDigestCompressorName records a compressor for the blob with the specified digest,
|
||||
// or Uncompressed or UnknownCompression.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a
|
||||
// digest just because some remote author claims so (e.g. because a manifest says so);
|
||||
// RecordDigestCompressorData records data for the blob with the specified digest.
|
||||
// WARNING: Only call this with LOCALLY VERIFIED data:
|
||||
// - don’t record a compressor for a digest just because some remote author claims so
|
||||
// (e.g. because a manifest says so);
|
||||
// - don’t record the non-base variant or annotations if we are not _sure_ that the base variant
|
||||
// and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them
|
||||
// in a manifest)
|
||||
//
|
||||
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
|
||||
// information in a manifest.
|
||||
func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) {
|
||||
func (sqc *cache) RecordDigestCompressorData(anyDigest digest.Digest, data blobinfocache.DigestCompressorData) {
|
||||
_, _ = transaction(sqc, func(tx *sql.Tx) (void, error) {
|
||||
previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest)
|
||||
return void{}, fmt.Errorf("looking for compressor of %q", anyDigest)
|
||||
}
|
||||
if gotPrevious && previous != compressorName {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName)
|
||||
warned := false
|
||||
if gotPrevious && previous != data.BaseVariantCompressor {
|
||||
logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, data.BaseVariantCompressor)
|
||||
warned = true
|
||||
}
|
||||
if compressorName == blobinfocache.UnknownCompression {
|
||||
if data.BaseVariantCompressor == blobinfocache.UnknownCompression {
|
||||
if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
if _, err := tx.Exec("DELETE FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String()); err != nil {
|
||||
return void{}, fmt.Errorf("deleting specific variant compressor for digest %q: %w", anyDigest, err)
|
||||
}
|
||||
} else {
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)",
|
||||
anyDigest.String(), compressorName); err != nil {
|
||||
return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err)
|
||||
anyDigest.String(), data.BaseVariantCompressor); err != nil {
|
||||
return void{}, fmt.Errorf("recording compressor %q for %q: %w", data.BaseVariantCompressor, anyDigest, err)
|
||||
}
|
||||
}
|
||||
|
||||
if data.SpecificVariantCompressor != blobinfocache.UnknownCompression {
|
||||
if !warned { // Don’t warn twice about the same digest
|
||||
prevSVC, found, err := querySingleValue[string](tx, "SELECT specificVariantCompressor FROM DigestSpecificVariantCompressors WHERE digest = ?", anyDigest.String())
|
||||
if err != nil {
|
||||
return void{}, fmt.Errorf("looking for specific variant compressor of %q", anyDigest)
|
||||
}
|
||||
if found && data.SpecificVariantCompressor != prevSVC {
|
||||
logrus.Warnf("Specific compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, prevSVC, data.SpecificVariantCompressor)
|
||||
}
|
||||
}
|
||||
annotations, err := json.Marshal(data.SpecificVariantAnnotations)
|
||||
if err != nil {
|
||||
return void{}, err
|
||||
}
|
||||
if _, err := tx.Exec("INSERT OR REPLACE INTO DigestSpecificVariantCompressors(digest, specificVariantCompressor, specificVariantAnnotations) VALUES (?, ?, ?)",
|
||||
anyDigest.String(), data.SpecificVariantCompressor, annotations); err != nil {
|
||||
return void{}, fmt.Errorf("recording specific variant compressor %q/%q for %q: %w", data.SpecificVariantCompressor, annotations, anyDigest, err)
|
||||
}
|
||||
}
|
||||
return void{}, nil
|
||||
@ -433,18 +532,33 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
|
||||
// with unknown compression.
|
||||
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest,
|
||||
v2Options *blobinfocache.CandidateLocations2Options) ([]prioritize.CandidateWithTime, error) {
|
||||
compressorName := blobinfocache.UnknownCompression
|
||||
compressionData := blobinfocache.DigestCompressorData{
|
||||
BaseVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantCompressor: blobinfocache.UnknownCompression,
|
||||
SpecificVariantAnnotations: nil,
|
||||
}
|
||||
if v2Options != nil {
|
||||
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("scanning compressorName: %w", err)
|
||||
}
|
||||
if found {
|
||||
compressorName = compressor
|
||||
var baseVariantCompressor string
|
||||
var specificVariantCompressor sql.NullString
|
||||
var annotationBytes []byte
|
||||
switch err := tx.QueryRow("SELECT compressor, specificVariantCompressor, specificVariantAnnotations "+
|
||||
"FROM DigestCompressors LEFT JOIN DigestSpecificVariantCompressors USING (digest) WHERE digest = ?", digest.String()).
|
||||
Scan(&baseVariantCompressor, &specificVariantCompressor, &annotationBytes); {
|
||||
case errors.Is(err, sql.ErrNoRows): // Do nothing
|
||||
case err != nil:
|
||||
return nil, fmt.Errorf("scanning compressor data: %w", err)
|
||||
default:
|
||||
compressionData.BaseVariantCompressor = baseVariantCompressor
|
||||
if specificVariantCompressor.Valid && annotationBytes != nil {
|
||||
compressionData.SpecificVariantCompressor = specificVariantCompressor.String
|
||||
if err := json.Unmarshal(annotationBytes, &compressionData.SpecificVariantAnnotations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ok, compressionOp, compressionAlgo := prioritize.CandidateCompression(v2Options, digest, compressorName)
|
||||
if !ok {
|
||||
template := prioritize.CandidateTemplateWithCompression(v2Options, digest, compressionData)
|
||||
if template == nil {
|
||||
return candidates, nil
|
||||
}
|
||||
|
||||
@ -463,15 +577,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
if err := rows.Scan(&location, &time); err != nil {
|
||||
return nil, fmt.Errorf("scanning candidate: %w", err)
|
||||
}
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
Location: types.BICLocationReference{Opaque: location},
|
||||
},
|
||||
LastSeen: time,
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithLocation(types.BICLocationReference{Opaque: location}, time))
|
||||
rowAdded = true
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
@ -479,16 +585,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
}
|
||||
|
||||
if !rowAdded && v2Options != nil {
|
||||
candidates = append(candidates, prioritize.CandidateWithTime{
|
||||
Candidate: blobinfocache.BICReplacementCandidate2{
|
||||
Digest: digest,
|
||||
CompressionOperation: compressionOp,
|
||||
CompressionAlgorithm: compressionAlgo,
|
||||
UnknownLocation: true,
|
||||
Location: types.BICLocationReference{Opaque: ""},
|
||||
},
|
||||
LastSeen: time.Time{},
|
||||
})
|
||||
candidates = append(candidates, template.CandidateWithUnknownLocation())
|
||||
}
|
||||
return candidates, nil
|
||||
}
|
||||
@ -516,40 +613,41 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if uncompressedDigest != "" {
|
||||
// FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries.
|
||||
// (In the extreme, we could turn _everything_ this function does into a single query.
|
||||
// And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.)
|
||||
// For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations.
|
||||
rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("querying for other digests: %w", err)
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
var otherDigestString string
|
||||
if err := rows.Scan(&otherDigestString); err != nil {
|
||||
return nil, fmt.Errorf("scanning other digest: %w", err)
|
||||
}
|
||||
otherDigest, err := digest.Parse(otherDigestString)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
}
|
||||
if err := rows.Err(); err != nil {
|
||||
return nil, fmt.Errorf("iterating through other digests: %w", err)
|
||||
}
|
||||
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
378
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
378
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -84,18 +84,36 @@ type storageImageDestinationLockProtected struct {
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
|
||||
|
||||
// In general, a layer is identified either by (compressed) digest, or by TOC digest.
|
||||
// Externally, a layer is identified either by (compressed) digest, or by TOC digest
|
||||
// (and we assume the TOC digest also uniquely identifies the contents, i.e. there aren’t two
|
||||
// different formats/ways to parse a single TOC); internally, we use uncompressed digest (“DiffID”) or a TOC digest.
|
||||
// We may or may not know the relationships between these three values.
|
||||
//
|
||||
// When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
|
||||
// we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
|
||||
// recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.)
|
||||
|
||||
// Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called.
|
||||
// The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted.
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
|
||||
// recompute those values for incoming layers — the point of the reuse is that we don’t need to consume the incoming layer.)
|
||||
//
|
||||
// Layer identification: For a layer, at least one of (indexToDiffID, indexToTOCDigest, blobDiffIDs) must be available
|
||||
// before commitLayer is called.
|
||||
// The layer is identified by the first of the three fields which exists, in that order (and the value must be trusted).
|
||||
//
|
||||
// WARNING: All values in indexToDiffID, indexToTOCDigest, and blobDiffIDs are _individually_ trusted, but blobDiffIDs is more subtle.
|
||||
// The values in indexTo* are all consistent, because the code writing them processed them all at once, and consistently.
|
||||
// But it is possible for a layer’s indexToDiffID an indexToTOCDigest to be based on a TOC, without setting blobDiffIDs
|
||||
// for the compressed digest of that index, and for blobDiffIDs[compressedDigest] to be set _separately_ while processing some
|
||||
// other layer entry. In particular it is possible for indexToDiffID[index] and blobDiffIDs[compressedDigestAtIndex]] to refer
|
||||
// to mismatching contents.
|
||||
// Users of these fields should use trustedLayerIdentityDataLocked, which centralizes the validity logic,
|
||||
// instead of interpreting these fields, especially blobDiffIDs, directly.
|
||||
//
|
||||
// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
|
||||
// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
|
||||
// should be available; or indexToDiffID/indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
|
||||
// They are looked up in the order they are mentioned above.
|
||||
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
|
||||
indexToAdditionalLayer map[int]storage.AdditionalLayer // Mapping from layer index to their corresponding additional layer
|
||||
@ -145,9 +163,12 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
||||
},
|
||||
indexToStorageID: make(map[int]string),
|
||||
lockProtected: storageImageDestinationLockProtected{
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
indexToTOCDigest: make(map[int]digest.Digest),
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
|
||||
indexToDiffID: make(map[int]digest.Digest),
|
||||
indexToTOCDigest: make(map[int]digest.Digest),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
|
||||
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
|
||||
indexToAdditionalLayer: make(map[int]storage.AdditionalLayer),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
@ -323,20 +344,30 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
|
||||
s.lock.Lock()
|
||||
if out.UncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
// Don’t set indexToTOCDigest on this path:
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match any other TOC value.
|
||||
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: ApplyDiffWithDiffer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust ApplyDiffWithDiffer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
// Don’t identify layers by TOC if UncompressedDigest is available.
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match with any other TOC value.
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
}
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
@ -465,49 +496,40 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
if size != -1 {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
if !options.CanSubstitute {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest)
|
||||
}
|
||||
s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
}, nil
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
|
||||
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
|
||||
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
|
||||
if uncompressedDigest != "" {
|
||||
layers, err = s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
|
||||
reused.MatchedByTOCDigest = true
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
// Check if we have a chunked layer in storage with the same TOC digest.
|
||||
layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
|
||||
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
if size != -1 {
|
||||
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: size,
|
||||
MatchedByTOCDigest: true,
|
||||
}, nil
|
||||
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
|
||||
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: layers[0].UncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MatchedByTOCDigest: true,
|
||||
}, nil
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
if uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||
reused.MatchedByTOCDigest = true
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -515,49 +537,137 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
|
||||
// reusedBlobFromLayerLookup returns (true, ReusedBlob) if layers contain a usable match; or (false, ...) if not.
|
||||
// The caller is still responsible for setting the layer identification fields, to allow the layer to be found again.
|
||||
func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest, blobSize int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob) {
|
||||
if len(layers) > 0 {
|
||||
if blobSize != -1 {
|
||||
return true, private.ReusedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: blobSize,
|
||||
}
|
||||
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
|
||||
return true, private.ReusedBlob{
|
||||
Digest: layers[0].UncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
CompressionOperation: types.Decompress,
|
||||
CompressionAlgorithm: nil,
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, private.ReusedBlob{}
|
||||
}
|
||||
|
||||
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
|
||||
type trustedLayerIdentityData struct {
|
||||
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
|
||||
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
|
||||
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
|
||||
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
|
||||
}
|
||||
|
||||
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
|
||||
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
|
||||
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
|
||||
//
|
||||
// The caller must hold s.lock.
|
||||
func (s *storageImageDestination) trustedLayerIdentityDataLocked(layerIndex int, blobDigest digest.Digest) (trustedLayerIdentityData, bool) {
|
||||
// The decision about layerIdentifiedByTOC must be _stable_ once the data for layerIndex is set,
|
||||
// even if s.lockProtected.blobDiffIDs changes later and we can subsequently find an entry that wasn’t originally available.
|
||||
//
|
||||
// If we previously didn't have a blobDigest match and decided to use the TOC, but _later_ we happen to find
|
||||
// a blobDigest match, we might in principle want to reconsider, set layerIdentifiedByTOC to false, and use the file:
|
||||
// but the layer in question, and possibly child layers, might already have been committed to storage.
|
||||
// A late-arriving addition to s.lockProtected.blobDiffIDs would mean that we would want to set
|
||||
// new layer IDs for potentially the whole parent chain = throw away the just-created layers and create them all again.
|
||||
//
|
||||
// Such a within-image layer reuse is expected to be pretty rare; instead, ignore the unexpected file match
|
||||
// and proceed to the originally-planned TOC match.
|
||||
|
||||
res := trustedLayerIdentityData{}
|
||||
diffID, layerIdentifiedByDiffID := s.lockProtected.indexToDiffID[layerIndex]
|
||||
if layerIdentifiedByDiffID {
|
||||
res.layerIdentifiedByTOC = false
|
||||
res.diffID = diffID
|
||||
}
|
||||
if tocDigest, ok := s.lockProtected.indexToTOCDigest[layerIndex]; ok {
|
||||
res.tocDigest = tocDigest
|
||||
if !layerIdentifiedByDiffID {
|
||||
res.layerIdentifiedByTOC = true
|
||||
}
|
||||
}
|
||||
if otherDiffID, ok := s.lockProtected.blobDiffIDs[blobDigest]; ok {
|
||||
if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
|
||||
// This is the only data we have, so it is clearly self-consistent.
|
||||
res.layerIdentifiedByTOC = false
|
||||
res.diffID = otherDiffID
|
||||
res.blobDigest = blobDigest
|
||||
layerIdentifiedByDiffID = true
|
||||
} else {
|
||||
// We have set up the layer identity without referring to blobDigest:
|
||||
// an attacker might have used a manifest with non-matching tocDigest and blobDigest.
|
||||
// But, if we know a trusted diffID value from other sources, and it matches the one for blobDigest,
|
||||
// we know blobDigest is fine as well.
|
||||
if res.diffID != "" && otherDiffID == res.diffID {
|
||||
res.blobDigest = blobDigest
|
||||
}
|
||||
}
|
||||
}
|
||||
if !layerIdentifiedByDiffID && !res.layerIdentifiedByTOC {
|
||||
return trustedLayerIdentityData{}, false // We found nothing at all
|
||||
}
|
||||
return res, true
|
||||
}
|
||||
|
||||
// computeID computes a recommended image ID based on information we have so far. If
|
||||
// the manifest is not of a type that we recognize, we return an empty value, indicating
|
||||
// that since we don't have a recommendation, a random ID should be used if one needs
|
||||
// to be allocated.
|
||||
func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
func (s *storageImageDestination) computeID(m manifest.Manifest) (string, error) {
|
||||
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
layerInfos := m.LayerInfos()
|
||||
|
||||
// Build the diffID list. We need the decompressed sums that we've been calculating to
|
||||
// fill in the DiffIDs. It's expected (but not enforced by us) that the number of
|
||||
// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
|
||||
var diffIDs []digest.Digest
|
||||
switch m := m.(type) {
|
||||
switch m.(type) {
|
||||
case *manifest.Schema1:
|
||||
// Build a list of the diffIDs we've generated for the non-throwaway FS layers,
|
||||
// in reverse of the order in which they were originally listed.
|
||||
for i, compat := range m.ExtractedV1Compatibility {
|
||||
if compat.ThrowAway {
|
||||
// Build a list of the diffIDs we've generated for the non-throwaway FS layers
|
||||
for i, li := range layerInfos {
|
||||
if li.EmptyLayer {
|
||||
continue
|
||||
}
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
diffID, ok := s.lockProtected.blobDiffIDs[blobSum]
|
||||
if !ok {
|
||||
// this can, in principle, legitimately happen when a layer is reused by TOC.
|
||||
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
|
||||
return ""
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
|
||||
if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
|
||||
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
|
||||
}
|
||||
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
|
||||
if trusted.diffID == "" {
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
logrus.Infof("v2s1 image uses a layer identified by TOC with unknown diffID; choosing a random image ID")
|
||||
return "", nil
|
||||
}
|
||||
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) is not identified by TOC and has no diffID", i, li.Digest)
|
||||
}
|
||||
diffIDs = append(diffIDs, trusted.diffID)
|
||||
}
|
||||
case *manifest.Schema2, *manifest.OCI1:
|
||||
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
|
||||
// the diffID list.
|
||||
default:
|
||||
return ""
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption.
|
||||
//
|
||||
// Traditionally that means the same ~config digest, as computed by m.ImageID;
|
||||
// but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
|
||||
// but if we identify a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
|
||||
// nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see
|
||||
// most of the data.
|
||||
//
|
||||
// So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
|
||||
// So, if a layer is identified by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
|
||||
// must enter into the image ID computation.
|
||||
// But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
|
||||
// and to introduce the changed behavior only when partial pulls are used.
|
||||
@ -566,28 +676,31 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
// (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
|
||||
ordinaryImageID, err := m.ImageID(diffIDs)
|
||||
if err != nil {
|
||||
return ""
|
||||
return "", err
|
||||
}
|
||||
tocIDInput := ""
|
||||
hasLayerPulledByTOC := false
|
||||
for i := range m.LayerInfos() {
|
||||
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
|
||||
tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC
|
||||
if ok {
|
||||
for i, li := range layerInfos {
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(i, li.Digest)
|
||||
if !ok { // We have already committed all layers if we get to this point, so the data must have been available.
|
||||
return "", fmt.Errorf("internal inconsistency: layer (%d, %q) not found", i, li.Digest)
|
||||
}
|
||||
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
hasLayerPulledByTOC = true
|
||||
layerValue = tocDigest.String()
|
||||
layerValue = trusted.tocDigest.String()
|
||||
}
|
||||
tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
|
||||
}
|
||||
|
||||
if !hasLayerPulledByTOC {
|
||||
return ordinaryImageID
|
||||
return ordinaryImageID, nil
|
||||
}
|
||||
// ordinaryImageID is a digest of a config, which is a JSON value.
|
||||
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
|
||||
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Encoded()
|
||||
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
|
||||
return tocImageID
|
||||
return tocImageID, nil
|
||||
}
|
||||
|
||||
// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
|
||||
@ -671,14 +784,14 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
|
||||
return "@TOC=" + d.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
|
||||
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
|
||||
return d.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
return "", false
|
||||
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
@ -778,6 +891,16 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
diffOutput, ok := s.lockProtected.diffOutputs[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// That way it will be persisted in storage even if the cache is deleted; also
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
|
||||
// the costly commit delay until a manifest is available).
|
||||
s.lock.Lock()
|
||||
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
|
||||
diffOutput.UncompressedDigest = d
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
var untrustedUncompressedDigest digest.Digest
|
||||
if diffOutput.UncompressedDigest == "" {
|
||||
d, err := s.untrustedLayerDiffID(index)
|
||||
@ -832,47 +955,43 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
// then we need to read the desired contents from a layer.
|
||||
var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
var filename string
|
||||
var gotFilename bool
|
||||
s.lock.Lock()
|
||||
tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set
|
||||
optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set
|
||||
filename, gotFilename := s.lockProtected.filenames[layerDigest]
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
|
||||
if ok && trusted.blobDigest != "" {
|
||||
filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
|
||||
}
|
||||
s.lock.Unlock()
|
||||
if gotFilename && tocDigest == "" {
|
||||
// If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based,
|
||||
// and we don't know the relationship of the layerDigest and TOC digest.
|
||||
// We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer
|
||||
// reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the
|
||||
// originally-planned TOC match.
|
||||
|
||||
// Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream.
|
||||
trustedUncompressedDigest = optionalDiffID
|
||||
trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching.
|
||||
if !ok { // We have already determined newLayerID, so the data must have been available.
|
||||
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
||||
}
|
||||
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
if gotFilename {
|
||||
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
} else {
|
||||
// Try to find the layer with contents matching the data we use.
|
||||
var layer *storage.Layer // = nil
|
||||
if tocDigest != "" {
|
||||
layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
if trusted.diffID != "" {
|
||||
if layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(trusted.diffID); err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
} else {
|
||||
return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2)
|
||||
}
|
||||
} else {
|
||||
// Because tocDigest == "", optionaldiffID must have been set
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
} else {
|
||||
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2)
|
||||
}
|
||||
}
|
||||
if layer == nil && trusted.tocDigest != "" {
|
||||
if layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(trusted.tocDigest); err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
}
|
||||
}
|
||||
if layer == nil && trusted.blobDigest != "" {
|
||||
if layers, err2 := s.imageRef.transport.store.LayersByCompressedDigest(trusted.blobDigest); err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
}
|
||||
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
@ -880,7 +999,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2)
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
@ -902,20 +1021,19 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
return nil, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
if optionalDiffID == "" && layer.UncompressedDigest != "" {
|
||||
optionalDiffID = layer.UncompressedDigest
|
||||
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
||||
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
||||
}
|
||||
// The stream we have is uncompressed, this matches contents of the stream.
|
||||
// If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream.
|
||||
trustedUncompressedDigest = optionalDiffID
|
||||
// FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted).
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
//
|
||||
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
|
||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == optionalDiffID.
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = optionalDiffID
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
|
||||
// Allow using the already-collected layer contents without extracting the layer again.
|
||||
//
|
||||
@ -923,11 +1041,11 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||
if trustedUncompressedDigest != "" {
|
||||
if trusted.diffID != "" {
|
||||
s.lock.Lock()
|
||||
s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest
|
||||
s.lockProtected.filenames[trustedUncompressedDigest] = filename
|
||||
s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize
|
||||
s.lockProtected.blobDiffIDs[trusted.diffID] = trusted.diffID
|
||||
s.lockProtected.filenames[trusted.diffID] = filename
|
||||
s.lockProtected.fileSizes[trusted.diffID] = fileSize
|
||||
s.lock.Unlock()
|
||||
}
|
||||
}
|
||||
@ -940,11 +1058,12 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
UncompressedDigest: trustedUncompressedDigest,
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
||||
UncompressedDigest: trusted.diffID,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err)
|
||||
return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
@ -1155,7 +1274,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||
// Create the image record, pointing to the most-recently added layer.
|
||||
intendedID := s.imageRef.id
|
||||
if intendedID == "" {
|
||||
intendedID = s.computeID(man)
|
||||
intendedID, err = s.computeID(man)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
oldNames := []string{}
|
||||
img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options)
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 32
|
||||
VersionMinor = 33
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
VersionPatch = 1
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
323
vendor/github.com/mattn/go-runewidth/runewidth_table.go
generated
vendored
323
vendor/github.com/mattn/go-runewidth/runewidth_table.go
generated
vendored
@ -4,20 +4,21 @@ package runewidth
|
||||
|
||||
var combining = table{
|
||||
{0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3},
|
||||
{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01},
|
||||
{0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0},
|
||||
{0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF},
|
||||
{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0CF3, 0x0CF3},
|
||||
{0x0D00, 0x0D01}, {0x135D, 0x135F}, {0x1A7F, 0x1A7F},
|
||||
{0x1AB0, 0x1ACE}, {0x1B6B, 0x1B73}, {0x1DC0, 0x1DFF},
|
||||
{0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF},
|
||||
{0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D},
|
||||
{0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1},
|
||||
{0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A},
|
||||
{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301},
|
||||
{0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374},
|
||||
{0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
|
||||
{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x10F82, 0x10F85},
|
||||
{0x11300, 0x11301}, {0x1133B, 0x1133C}, {0x11366, 0x1136C},
|
||||
{0x11370, 0x11374}, {0x16AF0, 0x16AF4}, {0x1CF00, 0x1CF2D},
|
||||
{0x1CF30, 0x1CF46}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
|
||||
{0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
|
||||
{0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018},
|
||||
{0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
|
||||
{0x1E8D0, 0x1E8D6},
|
||||
{0x1E08F, 0x1E08F}, {0x1E8D0, 0x1E8D6},
|
||||
}
|
||||
|
||||
var doublewidth = table{
|
||||
@ -33,33 +34,34 @@ var doublewidth = table{
|
||||
{0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
|
||||
{0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
|
||||
{0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
|
||||
{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
|
||||
{0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
|
||||
{0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3},
|
||||
{0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF},
|
||||
{0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C},
|
||||
{0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19},
|
||||
{0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B},
|
||||
{0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4},
|
||||
{0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5},
|
||||
{0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152},
|
||||
{0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004},
|
||||
{0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
|
||||
{0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
|
||||
{0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320},
|
||||
{0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393},
|
||||
{0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0},
|
||||
{0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440},
|
||||
{0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E},
|
||||
{0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596},
|
||||
{0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5},
|
||||
{0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7},
|
||||
{0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB},
|
||||
{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978},
|
||||
{0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74},
|
||||
{0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8},
|
||||
{0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6},
|
||||
{0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
|
||||
{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x303E},
|
||||
{0x3041, 0x3096}, {0x3099, 0x30FF}, {0x3105, 0x312F},
|
||||
{0x3131, 0x318E}, {0x3190, 0x31E3}, {0x31EF, 0x321E},
|
||||
{0x3220, 0x3247}, {0x3250, 0x4DBF}, {0x4E00, 0xA48C},
|
||||
{0xA490, 0xA4C6}, {0xA960, 0xA97C}, {0xAC00, 0xD7A3},
|
||||
{0xF900, 0xFAFF}, {0xFE10, 0xFE19}, {0xFE30, 0xFE52},
|
||||
{0xFE54, 0xFE66}, {0xFE68, 0xFE6B}, {0xFF01, 0xFF60},
|
||||
{0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4}, {0x16FF0, 0x16FF1},
|
||||
{0x17000, 0x187F7}, {0x18800, 0x18CD5}, {0x18D00, 0x18D08},
|
||||
{0x1AFF0, 0x1AFF3}, {0x1AFF5, 0x1AFFB}, {0x1AFFD, 0x1AFFE},
|
||||
{0x1B000, 0x1B122}, {0x1B132, 0x1B132}, {0x1B150, 0x1B152},
|
||||
{0x1B155, 0x1B155}, {0x1B164, 0x1B167}, {0x1B170, 0x1B2FB},
|
||||
{0x1F004, 0x1F004}, {0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E},
|
||||
{0x1F191, 0x1F19A}, {0x1F200, 0x1F202}, {0x1F210, 0x1F23B},
|
||||
{0x1F240, 0x1F248}, {0x1F250, 0x1F251}, {0x1F260, 0x1F265},
|
||||
{0x1F300, 0x1F320}, {0x1F32D, 0x1F335}, {0x1F337, 0x1F37C},
|
||||
{0x1F37E, 0x1F393}, {0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3},
|
||||
{0x1F3E0, 0x1F3F0}, {0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E},
|
||||
{0x1F440, 0x1F440}, {0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D},
|
||||
{0x1F54B, 0x1F54E}, {0x1F550, 0x1F567}, {0x1F57A, 0x1F57A},
|
||||
{0x1F595, 0x1F596}, {0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F},
|
||||
{0x1F680, 0x1F6C5}, {0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2},
|
||||
{0x1F6D5, 0x1F6D7}, {0x1F6DC, 0x1F6DF}, {0x1F6EB, 0x1F6EC},
|
||||
{0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB}, {0x1F7F0, 0x1F7F0},
|
||||
{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F9FF},
|
||||
{0x1FA70, 0x1FA7C}, {0x1FA80, 0x1FA88}, {0x1FA90, 0x1FABD},
|
||||
{0x1FABF, 0x1FAC5}, {0x1FACE, 0x1FADB}, {0x1FAE0, 0x1FAE8},
|
||||
{0x1FAF0, 0x1FAF8}, {0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
|
||||
}
|
||||
|
||||
var ambiguous = table{
|
||||
@ -154,43 +156,43 @@ var neutral = table{
|
||||
{0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
|
||||
{0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F},
|
||||
{0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4},
|
||||
{0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A},
|
||||
{0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D},
|
||||
{0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E},
|
||||
{0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7},
|
||||
{0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990},
|
||||
{0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2},
|
||||
{0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8},
|
||||
{0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD},
|
||||
{0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03},
|
||||
{0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28},
|
||||
{0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36},
|
||||
{0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
|
||||
{0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
|
||||
{0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76},
|
||||
{0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91},
|
||||
{0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3},
|
||||
{0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9},
|
||||
{0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3},
|
||||
{0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03},
|
||||
{0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28},
|
||||
{0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39},
|
||||
{0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D},
|
||||
{0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63},
|
||||
{0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A},
|
||||
{0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A},
|
||||
{0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4},
|
||||
{0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2},
|
||||
{0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0},
|
||||
{0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C},
|
||||
{0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39},
|
||||
{0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
|
||||
{0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63},
|
||||
{0x0600, 0x070D}, {0x070F, 0x074A}, {0x074D, 0x07B1},
|
||||
{0x07C0, 0x07FA}, {0x07FD, 0x082D}, {0x0830, 0x083E},
|
||||
{0x0840, 0x085B}, {0x085E, 0x085E}, {0x0860, 0x086A},
|
||||
{0x0870, 0x088E}, {0x0890, 0x0891}, {0x0898, 0x0983},
|
||||
{0x0985, 0x098C}, {0x098F, 0x0990}, {0x0993, 0x09A8},
|
||||
{0x09AA, 0x09B0}, {0x09B2, 0x09B2}, {0x09B6, 0x09B9},
|
||||
{0x09BC, 0x09C4}, {0x09C7, 0x09C8}, {0x09CB, 0x09CE},
|
||||
{0x09D7, 0x09D7}, {0x09DC, 0x09DD}, {0x09DF, 0x09E3},
|
||||
{0x09E6, 0x09FE}, {0x0A01, 0x0A03}, {0x0A05, 0x0A0A},
|
||||
{0x0A0F, 0x0A10}, {0x0A13, 0x0A28}, {0x0A2A, 0x0A30},
|
||||
{0x0A32, 0x0A33}, {0x0A35, 0x0A36}, {0x0A38, 0x0A39},
|
||||
{0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42}, {0x0A47, 0x0A48},
|
||||
{0x0A4B, 0x0A4D}, {0x0A51, 0x0A51}, {0x0A59, 0x0A5C},
|
||||
{0x0A5E, 0x0A5E}, {0x0A66, 0x0A76}, {0x0A81, 0x0A83},
|
||||
{0x0A85, 0x0A8D}, {0x0A8F, 0x0A91}, {0x0A93, 0x0AA8},
|
||||
{0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3}, {0x0AB5, 0x0AB9},
|
||||
{0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9}, {0x0ACB, 0x0ACD},
|
||||
{0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3}, {0x0AE6, 0x0AF1},
|
||||
{0x0AF9, 0x0AFF}, {0x0B01, 0x0B03}, {0x0B05, 0x0B0C},
|
||||
{0x0B0F, 0x0B10}, {0x0B13, 0x0B28}, {0x0B2A, 0x0B30},
|
||||
{0x0B32, 0x0B33}, {0x0B35, 0x0B39}, {0x0B3C, 0x0B44},
|
||||
{0x0B47, 0x0B48}, {0x0B4B, 0x0B4D}, {0x0B55, 0x0B57},
|
||||
{0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63}, {0x0B66, 0x0B77},
|
||||
{0x0B82, 0x0B83}, {0x0B85, 0x0B8A}, {0x0B8E, 0x0B90},
|
||||
{0x0B92, 0x0B95}, {0x0B99, 0x0B9A}, {0x0B9C, 0x0B9C},
|
||||
{0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4}, {0x0BA8, 0x0BAA},
|
||||
{0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2}, {0x0BC6, 0x0BC8},
|
||||
{0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0}, {0x0BD7, 0x0BD7},
|
||||
{0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C}, {0x0C0E, 0x0C10},
|
||||
{0x0C12, 0x0C28}, {0x0C2A, 0x0C39}, {0x0C3C, 0x0C44},
|
||||
{0x0C46, 0x0C48}, {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56},
|
||||
{0x0C58, 0x0C5A}, {0x0C5D, 0x0C5D}, {0x0C60, 0x0C63},
|
||||
{0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90},
|
||||
{0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
|
||||
{0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
|
||||
{0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
|
||||
{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C},
|
||||
{0x0CD5, 0x0CD6}, {0x0CDD, 0x0CDE}, {0x0CE0, 0x0CE3},
|
||||
{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF3}, {0x0D00, 0x0D0C},
|
||||
{0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48},
|
||||
{0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F},
|
||||
{0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
|
||||
@ -200,7 +202,7 @@ var neutral = table{
|
||||
{0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82},
|
||||
{0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3},
|
||||
{0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4},
|
||||
{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
|
||||
{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECE}, {0x0ED0, 0x0ED9},
|
||||
{0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
|
||||
{0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
|
||||
{0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
|
||||
@ -212,20 +214,19 @@ var neutral = table{
|
||||
{0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
|
||||
{0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
|
||||
{0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
|
||||
{0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
|
||||
{0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
|
||||
{0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
|
||||
{0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
|
||||
{0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
|
||||
{0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
|
||||
{0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
|
||||
{0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
|
||||
{0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
|
||||
{0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
|
||||
{0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
|
||||
{0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
|
||||
{0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7},
|
||||
{0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15},
|
||||
{0x1700, 0x1715}, {0x171F, 0x1736}, {0x1740, 0x1753},
|
||||
{0x1760, 0x176C}, {0x176E, 0x1770}, {0x1772, 0x1773},
|
||||
{0x1780, 0x17DD}, {0x17E0, 0x17E9}, {0x17F0, 0x17F9},
|
||||
{0x1800, 0x1819}, {0x1820, 0x1878}, {0x1880, 0x18AA},
|
||||
{0x18B0, 0x18F5}, {0x1900, 0x191E}, {0x1920, 0x192B},
|
||||
{0x1930, 0x193B}, {0x1940, 0x1940}, {0x1944, 0x196D},
|
||||
{0x1970, 0x1974}, {0x1980, 0x19AB}, {0x19B0, 0x19C9},
|
||||
{0x19D0, 0x19DA}, {0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E},
|
||||
{0x1A60, 0x1A7C}, {0x1A7F, 0x1A89}, {0x1A90, 0x1A99},
|
||||
{0x1AA0, 0x1AAD}, {0x1AB0, 0x1ACE}, {0x1B00, 0x1B4C},
|
||||
{0x1B50, 0x1B7E}, {0x1B80, 0x1BF3}, {0x1BFC, 0x1C37},
|
||||
{0x1C3B, 0x1C49}, {0x1C4D, 0x1C88}, {0x1C90, 0x1CBA},
|
||||
{0x1CBD, 0x1CC7}, {0x1CD0, 0x1CFA}, {0x1D00, 0x1F15},
|
||||
{0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
|
||||
{0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
|
||||
{0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
|
||||
@ -237,7 +238,7 @@ var neutral = table{
|
||||
{0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
|
||||
{0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
|
||||
{0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
|
||||
{0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0},
|
||||
{0x20AA, 0x20AB}, {0x20AD, 0x20C0}, {0x20D0, 0x20F0},
|
||||
{0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
|
||||
{0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
|
||||
{0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
|
||||
@ -275,15 +276,15 @@ var neutral = table{
|
||||
{0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE},
|
||||
{0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A},
|
||||
{0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73},
|
||||
{0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E},
|
||||
{0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27},
|
||||
{0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70},
|
||||
{0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE},
|
||||
{0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6},
|
||||
{0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE},
|
||||
{0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF},
|
||||
{0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF},
|
||||
{0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839},
|
||||
{0x2B76, 0x2B95}, {0x2B97, 0x2CF3}, {0x2CF9, 0x2D25},
|
||||
{0x2D27, 0x2D27}, {0x2D2D, 0x2D2D}, {0x2D30, 0x2D67},
|
||||
{0x2D6F, 0x2D70}, {0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6},
|
||||
{0x2DA8, 0x2DAE}, {0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE},
|
||||
{0x2DC0, 0x2DC6}, {0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6},
|
||||
{0x2DD8, 0x2DDE}, {0x2DE0, 0x2E5D}, {0x303F, 0x303F},
|
||||
{0x4DC0, 0x4DFF}, {0xA4D0, 0xA62B}, {0xA640, 0xA6F7},
|
||||
{0xA700, 0xA7CA}, {0xA7D0, 0xA7D1}, {0xA7D3, 0xA7D3},
|
||||
{0xA7D5, 0xA7D9}, {0xA7F2, 0xA82C}, {0xA830, 0xA839},
|
||||
{0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9},
|
||||
{0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
|
||||
{0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
|
||||
@ -294,8 +295,8 @@ var neutral = table{
|
||||
{0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
|
||||
{0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
|
||||
{0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
|
||||
{0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
|
||||
{0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
|
||||
{0xFB43, 0xFB44}, {0xFB46, 0xFBC2}, {0xFBD3, 0xFD8F},
|
||||
{0xFD92, 0xFDC7}, {0xFDCF, 0xFDCF}, {0xFDF0, 0xFDFF},
|
||||
{0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
|
||||
{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
|
||||
{0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
|
||||
@ -307,44 +308,48 @@ var neutral = table{
|
||||
{0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
|
||||
{0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
|
||||
{0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
|
||||
{0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
|
||||
{0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
|
||||
{0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
|
||||
{0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
|
||||
{0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
|
||||
{0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
|
||||
{0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
|
||||
{0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35},
|
||||
{0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58},
|
||||
{0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
|
||||
{0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
|
||||
{0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
|
||||
{0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
|
||||
{0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E},
|
||||
{0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1},
|
||||
{0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB},
|
||||
{0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F},
|
||||
{0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8},
|
||||
{0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147},
|
||||
{0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4},
|
||||
{0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286},
|
||||
{0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D},
|
||||
{0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9},
|
||||
{0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310},
|
||||
{0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333},
|
||||
{0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348},
|
||||
{0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357},
|
||||
{0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
|
||||
{0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7},
|
||||
{0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD},
|
||||
{0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C},
|
||||
{0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A},
|
||||
{0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B},
|
||||
{0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909},
|
||||
{0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935},
|
||||
{0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959},
|
||||
{0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4},
|
||||
{0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8},
|
||||
{0x1056F, 0x1057A}, {0x1057C, 0x1058A}, {0x1058C, 0x10592},
|
||||
{0x10594, 0x10595}, {0x10597, 0x105A1}, {0x105A3, 0x105B1},
|
||||
{0x105B3, 0x105B9}, {0x105BB, 0x105BC}, {0x10600, 0x10736},
|
||||
{0x10740, 0x10755}, {0x10760, 0x10767}, {0x10780, 0x10785},
|
||||
{0x10787, 0x107B0}, {0x107B2, 0x107BA}, {0x10800, 0x10805},
|
||||
{0x10808, 0x10808}, {0x1080A, 0x10835}, {0x10837, 0x10838},
|
||||
{0x1083C, 0x1083C}, {0x1083F, 0x10855}, {0x10857, 0x1089E},
|
||||
{0x108A7, 0x108AF}, {0x108E0, 0x108F2}, {0x108F4, 0x108F5},
|
||||
{0x108FB, 0x1091B}, {0x1091F, 0x10939}, {0x1093F, 0x1093F},
|
||||
{0x10980, 0x109B7}, {0x109BC, 0x109CF}, {0x109D2, 0x10A03},
|
||||
{0x10A05, 0x10A06}, {0x10A0C, 0x10A13}, {0x10A15, 0x10A17},
|
||||
{0x10A19, 0x10A35}, {0x10A38, 0x10A3A}, {0x10A3F, 0x10A48},
|
||||
{0x10A50, 0x10A58}, {0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6},
|
||||
{0x10AEB, 0x10AF6}, {0x10B00, 0x10B35}, {0x10B39, 0x10B55},
|
||||
{0x10B58, 0x10B72}, {0x10B78, 0x10B91}, {0x10B99, 0x10B9C},
|
||||
{0x10BA9, 0x10BAF}, {0x10C00, 0x10C48}, {0x10C80, 0x10CB2},
|
||||
{0x10CC0, 0x10CF2}, {0x10CFA, 0x10D27}, {0x10D30, 0x10D39},
|
||||
{0x10E60, 0x10E7E}, {0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD},
|
||||
{0x10EB0, 0x10EB1}, {0x10EFD, 0x10F27}, {0x10F30, 0x10F59},
|
||||
{0x10F70, 0x10F89}, {0x10FB0, 0x10FCB}, {0x10FE0, 0x10FF6},
|
||||
{0x11000, 0x1104D}, {0x11052, 0x11075}, {0x1107F, 0x110C2},
|
||||
{0x110CD, 0x110CD}, {0x110D0, 0x110E8}, {0x110F0, 0x110F9},
|
||||
{0x11100, 0x11134}, {0x11136, 0x11147}, {0x11150, 0x11176},
|
||||
{0x11180, 0x111DF}, {0x111E1, 0x111F4}, {0x11200, 0x11211},
|
||||
{0x11213, 0x11241}, {0x11280, 0x11286}, {0x11288, 0x11288},
|
||||
{0x1128A, 0x1128D}, {0x1128F, 0x1129D}, {0x1129F, 0x112A9},
|
||||
{0x112B0, 0x112EA}, {0x112F0, 0x112F9}, {0x11300, 0x11303},
|
||||
{0x11305, 0x1130C}, {0x1130F, 0x11310}, {0x11313, 0x11328},
|
||||
{0x1132A, 0x11330}, {0x11332, 0x11333}, {0x11335, 0x11339},
|
||||
{0x1133B, 0x11344}, {0x11347, 0x11348}, {0x1134B, 0x1134D},
|
||||
{0x11350, 0x11350}, {0x11357, 0x11357}, {0x1135D, 0x11363},
|
||||
{0x11366, 0x1136C}, {0x11370, 0x11374}, {0x11400, 0x1145B},
|
||||
{0x1145D, 0x11461}, {0x11480, 0x114C7}, {0x114D0, 0x114D9},
|
||||
{0x11580, 0x115B5}, {0x115B8, 0x115DD}, {0x11600, 0x11644},
|
||||
{0x11650, 0x11659}, {0x11660, 0x1166C}, {0x11680, 0x116B9},
|
||||
{0x116C0, 0x116C9}, {0x11700, 0x1171A}, {0x1171D, 0x1172B},
|
||||
{0x11730, 0x11746}, {0x11800, 0x1183B}, {0x118A0, 0x118F2},
|
||||
{0x118FF, 0x11906}, {0x11909, 0x11909}, {0x1190C, 0x11913},
|
||||
{0x11915, 0x11916}, {0x11918, 0x11935}, {0x11937, 0x11938},
|
||||
{0x1193B, 0x11946}, {0x11950, 0x11959}, {0x119A0, 0x119A7},
|
||||
{0x119AA, 0x119D7}, {0x119DA, 0x119E4}, {0x11A00, 0x11A47},
|
||||
{0x11A50, 0x11AA2}, {0x11AB0, 0x11AF8}, {0x11B00, 0x11B09},
|
||||
{0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45},
|
||||
{0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7},
|
||||
{0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
|
||||
@ -352,30 +357,36 @@ var neutral = table{
|
||||
{0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65},
|
||||
{0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91},
|
||||
{0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8},
|
||||
{0x11F00, 0x11F10}, {0x11F12, 0x11F3A}, {0x11F3E, 0x11F59},
|
||||
{0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399},
|
||||
{0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
|
||||
{0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646},
|
||||
{0x12F90, 0x12FF2}, {0x13000, 0x13455}, {0x14400, 0x14646},
|
||||
{0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69},
|
||||
{0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5},
|
||||
{0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61},
|
||||
{0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A},
|
||||
{0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F},
|
||||
{0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88},
|
||||
{0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5},
|
||||
{0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245},
|
||||
{0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378},
|
||||
{0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
|
||||
{0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
|
||||
{0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
|
||||
{0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
|
||||
{0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
|
||||
{0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
|
||||
{0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
|
||||
{0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
|
||||
{0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
|
||||
{0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
|
||||
{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9},
|
||||
{0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
|
||||
{0x16A6E, 0x16ABE}, {0x16AC0, 0x16AC9}, {0x16AD0, 0x16AED},
|
||||
{0x16AF0, 0x16AF5}, {0x16B00, 0x16B45}, {0x16B50, 0x16B59},
|
||||
{0x16B5B, 0x16B61}, {0x16B63, 0x16B77}, {0x16B7D, 0x16B8F},
|
||||
{0x16E40, 0x16E9A}, {0x16F00, 0x16F4A}, {0x16F4F, 0x16F87},
|
||||
{0x16F8F, 0x16F9F}, {0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C},
|
||||
{0x1BC80, 0x1BC88}, {0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3},
|
||||
{0x1CF00, 0x1CF2D}, {0x1CF30, 0x1CF46}, {0x1CF50, 0x1CFC3},
|
||||
{0x1D000, 0x1D0F5}, {0x1D100, 0x1D126}, {0x1D129, 0x1D1EA},
|
||||
{0x1D200, 0x1D245}, {0x1D2C0, 0x1D2D3}, {0x1D2E0, 0x1D2F3},
|
||||
{0x1D300, 0x1D356}, {0x1D360, 0x1D378}, {0x1D400, 0x1D454},
|
||||
{0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F}, {0x1D4A2, 0x1D4A2},
|
||||
{0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC}, {0x1D4AE, 0x1D4B9},
|
||||
{0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3}, {0x1D4C5, 0x1D505},
|
||||
{0x1D507, 0x1D50A}, {0x1D50D, 0x1D514}, {0x1D516, 0x1D51C},
|
||||
{0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E}, {0x1D540, 0x1D544},
|
||||
{0x1D546, 0x1D546}, {0x1D54A, 0x1D550}, {0x1D552, 0x1D6A5},
|
||||
{0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B}, {0x1DA9B, 0x1DA9F},
|
||||
{0x1DAA1, 0x1DAAF}, {0x1DF00, 0x1DF1E}, {0x1DF25, 0x1DF2A},
|
||||
{0x1E000, 0x1E006}, {0x1E008, 0x1E018}, {0x1E01B, 0x1E021},
|
||||
{0x1E023, 0x1E024}, {0x1E026, 0x1E02A}, {0x1E030, 0x1E06D},
|
||||
{0x1E08F, 0x1E08F}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
|
||||
{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E290, 0x1E2AE},
|
||||
{0x1E2C0, 0x1E2F9}, {0x1E2FF, 0x1E2FF}, {0x1E4D0, 0x1E4F9},
|
||||
{0x1E7E0, 0x1E7E6}, {0x1E7E8, 0x1E7EB}, {0x1E7ED, 0x1E7EE},
|
||||
{0x1E7F0, 0x1E7FE}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
|
||||
{0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
|
||||
{0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03},
|
||||
{0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24},
|
||||
@ -400,8 +411,8 @@ var neutral = table{
|
||||
{0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
|
||||
{0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
|
||||
{0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4},
|
||||
{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773},
|
||||
{0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
|
||||
{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F776},
|
||||
{0x1F77B, 0x1F7D9}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
|
||||
{0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD},
|
||||
{0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B},
|
||||
{0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D},
|
||||
|
15
vendor/github.com/vbauerster/mpb/v8/bar.go
generated
vendored
15
vendor/github.com/vbauerster/mpb/v8/bar.go
generated
vendored
@ -173,13 +173,12 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
|
||||
}
|
||||
|
||||
// EnableTriggerComplete enables triggering complete event. It's effective
|
||||
// only for bars which were constructed with `total <= 0` and after total
|
||||
// has been set with `(*Bar).SetTotal(int64, false)`. If `curren >= total`
|
||||
// only for bars which were constructed with `total <= 0`. If `curren >= total`
|
||||
// at the moment of call, complete event is triggered right away.
|
||||
func (b *Bar) EnableTriggerComplete() {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
if s.triggerComplete || s.total <= 0 {
|
||||
if s.triggerComplete {
|
||||
return
|
||||
}
|
||||
if s.current >= s.total {
|
||||
@ -196,10 +195,10 @@ func (b *Bar) EnableTriggerComplete() {
|
||||
|
||||
// SetTotal sets total to an arbitrary value. It's effective only for bar
|
||||
// which was constructed with `total <= 0`. Setting total to negative value
|
||||
// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. If
|
||||
// triggerCompletion is true, total value is set to current and complete
|
||||
// event is triggered right away.
|
||||
func (b *Bar) SetTotal(total int64, triggerCompletion bool) {
|
||||
// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster.
|
||||
// If `complete` is true complete event is triggered right away.
|
||||
// Calling `(*Bar).EnableTriggerComplete` makes this one no operational.
|
||||
func (b *Bar) SetTotal(total int64, complete bool) {
|
||||
select {
|
||||
case b.operateState <- func(s *bState) {
|
||||
if s.triggerComplete {
|
||||
@ -210,7 +209,7 @@ func (b *Bar) SetTotal(total int64, triggerCompletion bool) {
|
||||
} else {
|
||||
s.total = total
|
||||
}
|
||||
if triggerCompletion {
|
||||
if complete {
|
||||
s.current = s.total
|
||||
s.completed = true
|
||||
b.triggerCompletion(s)
|
||||
|
4
vendor/golang.org/x/crypto/LICENSE
generated
vendored
4
vendor/golang.org/x/crypto/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
5777
vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
generated
vendored
5777
vendor/golang.org/x/crypto/sha3/keccakf_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/golang.org/x/oauth2/LICENSE
generated
vendored
4
vendor/golang.org/x/oauth2/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
4
vendor/golang.org/x/sync/LICENSE
generated
vendored
4
vendor/golang.org/x/sync/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
4
vendor/golang.org/x/text/LICENSE
generated
vendored
4
vendor/golang.org/x/text/LICENSE
generated
vendored
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright 2009 The Go Authors.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
@ -10,7 +10,7 @@ notice, this list of conditions and the following disclaimer.
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
* Neither the name of Google LLC nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
|
14
vendor/modules.txt
vendored
14
vendor/modules.txt
vendored
@ -73,7 +73,7 @@ github.com/containers/common/pkg/password
|
||||
github.com/containers/common/pkg/report
|
||||
github.com/containers/common/pkg/report/camelcase
|
||||
github.com/containers/common/pkg/retry
|
||||
# github.com/containers/image/v5 v5.32.0
|
||||
# github.com/containers/image/v5 v5.32.1
|
||||
## explicit; go 1.21.0
|
||||
github.com/containers/image/v5/copy
|
||||
github.com/containers/image/v5/directory
|
||||
@ -404,7 +404,7 @@ github.com/letsencrypt/boulder/strictyaml
|
||||
github.com/mailru/easyjson/buffer
|
||||
github.com/mailru/easyjson/jlexer
|
||||
github.com/mailru/easyjson/jwriter
|
||||
# github.com/mattn/go-runewidth v0.0.15
|
||||
# github.com/mattn/go-runewidth v0.0.16
|
||||
## explicit; go 1.9
|
||||
github.com/mattn/go-runewidth
|
||||
# github.com/mattn/go-sqlite3 v1.14.22
|
||||
@ -552,7 +552,7 @@ github.com/ulikunitz/xz/lzma
|
||||
github.com/vbatts/tar-split/archive/tar
|
||||
github.com/vbatts/tar-split/tar/asm
|
||||
github.com/vbatts/tar-split/tar/storage
|
||||
# github.com/vbauerster/mpb/v8 v8.7.4
|
||||
# github.com/vbauerster/mpb/v8 v8.7.5
|
||||
## explicit; go 1.17
|
||||
github.com/vbauerster/mpb/v8
|
||||
github.com/vbauerster/mpb/v8/cwriter
|
||||
@ -613,7 +613,7 @@ go.opentelemetry.io/otel/metric/embedded
|
||||
## explicit; go 1.20
|
||||
go.opentelemetry.io/otel/trace
|
||||
go.opentelemetry.io/otel/trace/embedded
|
||||
# golang.org/x/crypto v0.25.0
|
||||
# golang.org/x/crypto v0.26.0
|
||||
## explicit; go 1.20
|
||||
golang.org/x/crypto/cast5
|
||||
golang.org/x/crypto/internal/alias
|
||||
@ -644,11 +644,11 @@ golang.org/x/net/http2/hpack
|
||||
golang.org/x/net/idna
|
||||
golang.org/x/net/internal/timeseries
|
||||
golang.org/x/net/trace
|
||||
# golang.org/x/oauth2 v0.21.0
|
||||
# golang.org/x/oauth2 v0.22.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/oauth2
|
||||
golang.org/x/oauth2/internal
|
||||
# golang.org/x/sync v0.7.0
|
||||
# golang.org/x/sync v0.8.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/semaphore
|
||||
@ -662,7 +662,7 @@ golang.org/x/sys/windows/registry
|
||||
# golang.org/x/term v0.23.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.16.0
|
||||
# golang.org/x/text v0.17.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/text/secure/bidirule
|
||||
golang.org/x/text/transform
|
||||
|
Loading…
Reference in New Issue
Block a user