Update c/image and c/common to latest

... to include https://github.com/containers/image/pull/2173
and https://github.com/containers/common/pull/1731 .

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2023-11-07 19:42:07 +01:00
parent 313342bdf8
commit 518181e595
165 changed files with 11655 additions and 5923 deletions

55
go.mod
View File

@ -3,8 +3,8 @@ module github.com/containers/skopeo
go 1.19
require (
github.com/containers/common v0.56.1-0.20231101110448-3e5caa093e79
github.com/containers/image/v5 v5.28.0
github.com/containers/common v0.56.1-0.20231116145922-4857e16ba481
github.com/containers/image/v5 v5.28.1-0.20231115174231-199d2562a0d1
github.com/containers/ocicrypt v1.1.9
github.com/containers/storage v1.51.0
github.com/docker/distribution v2.8.3+incompatible
@ -16,7 +16,7 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.4
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/exp v0.0.0-20230905200255-921286631fa9
golang.org/x/exp v0.0.0-20231006140011-7918f672742d
golang.org/x/term v0.14.0
gopkg.in/yaml.v3 v3.0.1
)
@ -30,11 +30,11 @@ require (
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/containerd/cgroups/v3 v3.0.2 // indirect
github.com/containerd/containerd v1.7.8 // indirect
github.com/containerd/containerd v1.7.9 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
github.com/coreos/go-oidc/v3 v3.6.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect
github.com/coreos/go-oidc/v3 v3.7.0 // indirect
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/reference v0.5.0 // indirect
@ -43,8 +43,8 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
github.com/go-logr/logr v1.3.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.4 // indirect
@ -66,17 +66,17 @@ require (
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
github.com/hashicorp/go-retryablehttp v0.7.5 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.2 // indirect
github.com/klauspost/compress v1.17.3 // indirect
github.com/klauspost/pgzip v1.2.6 // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/mattn/go-sqlite3 v1.14.17 // indirect
github.com/mattn/go-sqlite3 v1.14.18 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
@ -96,37 +96,36 @@ require (
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.4.0 // indirect
github.com/sigstore/fulcio v1.4.3 // indirect
github.com/sigstore/rekor v1.2.2 // indirect
github.com/sigstore/sigstore v1.7.3 // indirect
github.com/sigstore/sigstore v1.7.5 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.13.0 // indirect
github.com/sylabs/sif/v2 v2.15.0 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.5 // indirect
github.com/vbauerster/mpb/v8 v8.6.1 // indirect
github.com/vbauerster/mpb/v8 v8.6.2 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/otel v1.16.0 // indirect
go.opentelemetry.io/otel/metric v1.16.0 // indirect
go.opentelemetry.io/otel/trace v1.16.0 // indirect
golang.org/x/crypto v0.14.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.12.0 // indirect
golang.org/x/sync v0.4.0 // indirect
go.opentelemetry.io/otel v1.19.0 // indirect
go.opentelemetry.io/otel/metric v1.19.0 // indirect
go.opentelemetry.io/otel/trace v1.19.0 // indirect
golang.org/x/crypto v0.15.0 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.18.0 // indirect
golang.org/x/oauth2 v0.14.0 // indirect
golang.org/x/sync v0.5.0 // indirect
golang.org/x/sys v0.14.0 // indirect
golang.org/x/text v0.13.0 // indirect
golang.org/x/tools v0.13.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/tools v0.14.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
google.golang.org/grpc v1.58.3 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect

143
go.sum
View File

@ -26,26 +26,26 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
github.com/containerd/containerd v1.7.8 h1:RkwgOW3AVUT3H/dyT0W03Dc8AzlpMG65lX48KftOFSM=
github.com/containerd/containerd v1.7.8/go.mod h1:L/Hn9qylJtUFT7cPeM0Sr3fATj+WjHwRQ0lyrYk3OPY=
github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc=
github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containers/common v0.56.1-0.20231101110448-3e5caa093e79 h1:ECmubgcAF/lT8BHT29ZGu0hBqru8qpNY85HHFATa1wc=
github.com/containers/common v0.56.1-0.20231101110448-3e5caa093e79/go.mod h1:WOhqTBT+2qSHz86CYZ+4LYAkcM9kSQJ7ZobNu0AGJPU=
github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048=
github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU=
github.com/containers/common v0.56.1-0.20231116145922-4857e16ba481 h1:CMKyklpGngAH9Ao6hTmxglYFfAgxw/Ayu/ovB81lf24=
github.com/containers/common v0.56.1-0.20231116145922-4857e16ba481/go.mod h1:OWGhho071scyQI0NdI7uQPCLxNyNEv2ExXvxJUWmZoM=
github.com/containers/image/v5 v5.28.1-0.20231115174231-199d2562a0d1 h1:r0pQSYw/b6NZqduvo72hb+4Q50VQ/pvazBckzyE1YuU=
github.com/containers/image/v5 v5.28.1-0.20231115174231-199d2562a0d1/go.mod h1:0/j6iwLprBC+yraLkrZRkc3egvlZfM7hjNcGhzZfrMs=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA=
github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc=
github.com/coreos/go-oidc/v3 v3.6.0 h1:AKVxfYw1Gmkn/w96z0DbT/B/xFnzTd3MkZvWLjF4n/o=
github.com/coreos/go-oidc/v3 v3.6.0/go.mod h1:ZpHUsHBucTUj6WOkrP4E20UPynbLZzhTQ1XKCXkxyPc=
github.com/coreos/go-oidc/v3 v3.7.0 h1:FTdj0uexT4diYIPlF4yoFVI5MRO1r5+SEcIpEw9vC0o=
github.com/coreos/go-oidc/v3 v3.7.0/go.mod h1:yQzSCqBnK3e6Fs5l+f5i0F8Kwf0zpH9bPEsbY00KanM=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd h1:0av0vtcjA8Hqv5gyWj79CLCFVwOOyBNWPjrfUWceMNg=
github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -53,6 +53,7 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
@ -61,6 +62,7 @@ github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E1
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 h1:iFaUwBSo5Svw6L7HYpRu/0lE3e0BaElwnNO1qkNQxBY=
@ -73,11 +75,11 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo=
github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
@ -118,7 +120,7 @@ github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogB
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-rod/rod v0.114.3 h1:gAUT2Bc2wy0tQL5KEet05HNDvmndaHAGCjQ01TB2efA=
github.com/go-rod/rod v0.114.4 h1:FpkNFukjCuZLwnoLs+S9aCL95o/EMec6M+41UmvQay8=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
@ -154,7 +156,6 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
@ -164,6 +165,7 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@ -198,8 +200,8 @@ github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxC
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZnpUv3/+BxzFA=
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
@ -217,8 +219,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4=
github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -244,8 +246,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
@ -268,8 +270,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4=
github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg=
github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI=
@ -286,6 +288,7 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqwnHagNDPGOlts35QkhAZ8by3DR7nMih7M=
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -295,18 +298,18 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
@ -317,12 +320,12 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8=
github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI=
github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
github.com/sigstore/sigstore v1.7.3 h1:HVVTfrMezJeLyl2xhJ8edzkrEGBa4KxjQZB4FlQ4JLU=
github.com/sigstore/sigstore v1.7.3/go.mod h1:cl0c7Dtg3MM3c13L8pqqrfrmBa0eM3POcdtBepjylmw=
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
@ -351,14 +354,12 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/sylabs/sif/v2 v2.13.0 h1:dK/PQ/ohLAA4hptbjNuU0qoqkJ9Kl07hiSHArMNSKsQ=
github.com/sylabs/sif/v2 v2.13.0/go.mod h1:qEFrmE29XNbW2uyBagTsw9dgM82MwsckNYUFPweF2ek=
github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw=
github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
github.com/theupdateframework/go-tuf v0.5.2 h1:habfDzTmpbzBLIFGWa2ZpVhYvFBoK0C1onC3a4zuPRA=
github.com/theupdateframework/go-tuf v0.5.2/go.mod h1:SyMV5kg5n4uEclsyxXJZI2UxPFJNDc4Y+r7wv+MlvTA=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
@ -368,8 +369,8 @@ github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
github.com/vbauerster/mpb/v8 v8.6.1 h1:XbBpIbJxJOO9yMcKPpI4oEFPW6tLAptefNQJNcGWri8=
github.com/vbauerster/mpb/v8 v8.6.1/go.mod h1:S0tuIjikxlLxCeNijNhwAuD/BB3UE/d2nygG8SOldk0=
github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
@ -392,6 +393,7 @@ github.com/ysmood/gson v0.7.3 h1:QFkWbTH8MxyUTKPkVWAENJhxqdBa4lYTQWqZCiLG6kE=
github.com/ysmood/leakless v0.8.0 h1:BzLrVoiwxikpgEQR0Lk8NyBN5Cit2b1z+u0mgL4ZJak=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
@ -401,13 +403,13 @@ go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdH
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s=
go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4=
go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo=
go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4=
go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE=
go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs=
go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0=
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@ -416,36 +418,39 @@ golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA=
golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g=
golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY=
golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg=
golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4=
golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4=
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -454,8 +459,9 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -471,19 +477,21 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8=
golang.org/x/term v0.14.0/go.mod h1:TySc+nGkYR6qt8km8wUhuFRTVSMIX3XPR58y2lC8vww=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@ -497,21 +505,22 @@ golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc=
golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=

View File

@ -16,6 +16,7 @@ import (
"github.com/containers/image/v5/pkg/docker/config"
"github.com/containers/image/v5/pkg/sysregistriesv2"
"github.com/containers/image/v5/types"
"github.com/containers/storage/pkg/homedir"
"github.com/sirupsen/logrus"
)
@ -39,33 +40,46 @@ func (e ErrNewCredentialsInvalid) Unwrap() error {
// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default
// --authfile path used in multiple --authfile flag definitions
// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set
//
// WARNINGS:
// - In almost all invocations, expect this function to return ""; so it can not be used
// for directly accessing the file.
// - Use this only for commands that _read_ credentials, not write them.
// The path may refer to github.com/containers auth.json, or to Docker config.json,
// and the distinction is lost; writing auth.json data to config.json may not be consumable by Docker,
// or it may overwrite and discard unrelated Docker configuration set by the user.
func GetDefaultAuthFile() string {
// Keep this in sync with the default logic in systemContextWithOptions!
if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" {
return authfile
}
// This pre-existing behavior is not conceptually consistent:
// If users have a ~/.docker/config.json in the default path, and no environment variable
// set, we read auth.json first, falling back to config.json;
// but if DOCKER_CONFIG is set, we read only config.json in that path, and we dont read auth.json at all.
if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" {
return filepath.Join(authEnv, "config.json")
}
return ""
}
// CheckAuthFile validates filepath given by --authfile
// used by command has --authfile flag
func CheckAuthFile(authfile string) error {
if authfile == "" {
// CheckAuthFile validates a path option, failing if the option is set but the referenced file is not accessible.
func CheckAuthFile(pathOption string) error {
if pathOption == "" {
return nil
}
if _, err := os.Stat(authfile); err != nil {
return fmt.Errorf("checking authfile: %w", err)
if _, err := os.Stat(pathOption); err != nil {
return fmt.Errorf("credential file is not accessible: %w", err)
}
return nil
}
// systemContextWithOptions returns a version of sys
// updated with authFile and certDir values (if they are not "").
// updated with authFile, dockerCompatAuthFile and certDir values (if they are not "").
// NOTE: this is a shallow copy that can be used and updated, but may share
// data with the original parameter.
func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext {
func systemContextWithOptions(sys *types.SystemContext, authFile, dockerCompatAuthFile, certDir string) (*types.SystemContext, error) {
if sys != nil {
sysCopy := *sys
sys = &sysCopy
@ -73,24 +87,50 @@ func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string
sys = &types.SystemContext{}
}
if authFile != "" {
defaultDockerConfigPath := filepath.Join(homedir.Get(), ".docker", "config.json")
switch {
case authFile != "" && dockerCompatAuthFile != "":
return nil, errors.New("options for paths to the credential file and to the Docker-compatible credential file can not be set simultaneously")
case authFile != "":
if authFile == defaultDockerConfigPath {
logrus.Warn("saving credentials to ~/.docker/config.json, but not using Docker-compatible file format")
}
sys.AuthFilePath = authFile
case dockerCompatAuthFile != "":
sys.DockerCompatAuthFilePath = dockerCompatAuthFile
default:
// Keep this in sync with GetDefaultAuthFile()!
//
// Note that c/image does not natively implement the REGISTRY_AUTH_FILE
// variable, so not all callers look for credentials in this location.
if authFileVar := os.Getenv("REGISTRY_AUTH_FILE"); authFileVar != "" {
if authFileVar == defaultDockerConfigPath {
logrus.Warn("$REGISTRY_AUTH_FILE points to ~/.docker/config.json, but the file format is not fully compatible; use the Docker-compatible file path option instead")
}
sys.AuthFilePath = authFileVar
} else if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" {
// This preserves pre-existing _inconsistent_ behavior:
// If the Docker configuration exists in the default ~/.docker/config.json location,
// we DO NOT write to it; instead, we update auth.json in the default path.
// Only if the user explicitly sets DOCKER_CONFIG, we write to that config.json.
sys.DockerCompatAuthFilePath = filepath.Join(dockerConfig, "config.json")
}
}
if certDir != "" {
sys.DockerCertPath = certDir
}
return sys
return sys, nil
}
// Login implements a “log in” command with the provided opts and args
// reading the password from opts.Stdin or the options in opts.
func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error {
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir)
systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, opts.CertDir)
if err != nil {
return err
}
var (
key, registry string
err error
)
var key, registry string
switch len(args) {
case 0:
if !opts.AcceptUnspecifiedRegistry {
@ -284,7 +324,13 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
if err := CheckAuthFile(opts.AuthFile); err != nil {
return err
}
systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "")
if err := CheckAuthFile(opts.DockerCompatAuthFile); err != nil {
return err
}
systemContext, err := systemContextWithOptions(systemContext, opts.AuthFile, opts.DockerCompatAuthFile, "")
if err != nil {
return err
}
if opts.All {
if len(args) != 0 {
@ -297,10 +343,7 @@ func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []stri
return nil
}
var (
key, registry string
err error
)
var key, registry string
switch len(args) {
case 0:
if !opts.AcceptUnspecifiedRegistry {

View File

@ -14,14 +14,15 @@ type LoginOptions struct {
// CLI flags managed by the FlagSet returned by GetLoginFlags
// Callers that use GetLoginFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
AuthFile string
CertDir string
Password string
Username string
StdinPassword bool
GetLoginSet bool
Verbose bool // set to true for verbose output
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
AuthFile string
DockerCompatAuthFile string
CertDir string
Password string
Username string
StdinPassword bool
GetLoginSet bool
Verbose bool // set to true for verbose output
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdin io.Reader // set to os.Stdin
Stdout io.Writer // set to os.Stdout
@ -34,9 +35,10 @@ type LogoutOptions struct {
// CLI flags managed by the FlagSet returned by GetLogoutFlags
// Callers that use GetLogoutFlags should not need to touch these values at all; callers that use
// other CLI frameworks should set them based on user input.
AuthFile string
All bool
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
AuthFile string
DockerCompatAuthFile string
All bool
AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries
// Options caller can set
Stdout io.Writer // set to os.Stdout
AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry
@ -45,7 +47,8 @@ type LogoutOptions struct {
// GetLoginFlags defines and returns login flags for containers tools
func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.StringVarP(&flags.Password, "password", "p", "", "Password for registry")
fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry")
@ -59,6 +62,7 @@ func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet {
func GetLoginFlagsCompletions() completion.FlagCompletions {
flagCompletion := completion.FlagCompletions{}
flagCompletion["authfile"] = completion.AutocompleteDefault
flagCompletion["compat-auth-file"] = completion.AutocompleteDefault
flagCompletion["cert-dir"] = completion.AutocompleteDefault
flagCompletion["password"] = completion.AutocompleteNone
flagCompletion["username"] = completion.AutocompleteNone
@ -68,7 +72,8 @@ func GetLoginFlagsCompletions() completion.FlagCompletions {
// GetLogoutFlags defines and returns logout flags for containers tools
func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.AuthFile, "authfile", "", "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
fs.StringVar(&flags.DockerCompatAuthFile, "compat-auth-file", "", "path of a Docker-compatible config file to update instead")
fs.BoolVarP(&flags.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file")
return &fs
}
@ -77,5 +82,6 @@ func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet {
func GetLogoutFlagsCompletions() completion.FlagCompletions {
flagCompletion := completion.FlagCompletions{}
flagCompletion["authfile"] = completion.AutocompleteDefault
flagCompletion["compat-auth-file"] = completion.AutocompleteDefault
return flagCompletion
}

View File

@ -284,11 +284,24 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf
}
}
if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
if d.uploadedCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms.
// There is already a similar hack in internal/imagedestination/impl/helpers.BlobMatchesRequiredCompression,
// and that one prevents reusing zstd:chunked blobs, so recording the algorithm here would be mostly harmless.
//
// We skip that here anyway to work around the inability of blobPipelineDetectCompressionStep to differentiate
// between zstd and zstd:chunked; so we could, in varying situations over time, call RecordDigestCompressorName
// with the same digest and both ZstdAlgorithmName and ZstdChunkedAlgorithmName , which causes warnings about
// inconsistent data to be logged.
c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName)
}
}
if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest &&
d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression {
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
if d.srcCompressorName != compressiontypes.ZstdChunkedAlgorithmName {
// HACK: Dont record zstd:chunked algorithms, see above.
c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName)
}
}
return nil
}

View File

@ -70,7 +70,7 @@ func (d *bpDecryptionStepData) updateCryptoOperation(operation *types.LayerCrypt
}
}
// bpdData contains data that the copy pipeline needs about the encryption step.
// bpEncryptionStepData contains data that the copy pipeline needs about the encryption step.
type bpEncryptionStepData struct {
encrypting bool // We are actually encrypting the stream
finalizer ocicrypt.EncryptLayerFinalizer

View File

@ -340,7 +340,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
if err != nil {
return nil, err
}
sigs = append(sigs, newSigs...)
sigs = append(slices.Clone(sigs), newSigs...)
c.Printf("Storing list signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {

View File

@ -277,7 +277,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
if err != nil {
return copySingleImageResult{}, err
}
sigs = append(sigs, newSigs...)
sigs = append(slices.Clone(sigs), newSigs...)
if len(sigs) > 0 {
c.Printf("Storing signatures\n")
@ -380,8 +380,9 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
compressionAlgos := set.New[string]()
for _, srcInfo := range ic.src.LayerInfos() {
compression := compressionAlgorithmFromMIMEType(srcInfo)
compressionAlgos.Add(compression.Name())
if c := compressionAlgorithmFromMIMEType(srcInfo); c != nil {
compressionAlgos.Add(c.Name())
}
}
algos, err := algorithmsByNames(compressionAlgos.Values())
@ -743,7 +744,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
if srcInfo.Size != -1 {
bar.SetRefill(srcInfo.Size - bar.Current())
refill := srcInfo.Size - bar.Current()
bar.SetCurrent(srcInfo.Size)
bar.SetRefill(refill)
}
bar.mark100PercentComplete()
hideProgressBar = false

View File

@ -2,6 +2,7 @@ package daemon
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
@ -85,12 +86,40 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe
}
}()
err = imageLoad(ctx, c, reader)
}
// imageLoad accepts tar stream on reader and sends it to c
func imageLoad(ctx context.Context, c *client.Client, reader *io.PipeReader) error {
resp, err := c.ImageLoad(ctx, reader, true)
if err != nil {
err = fmt.Errorf("saving image to docker engine: %w", err)
return
return fmt.Errorf("starting a load operation in docker engine: %w", err)
}
defer resp.Body.Close()
// jsonError and jsonMessage are small subsets of docker/docker/pkg/jsonmessage.JSONError and JSONMessage,
// copied here to minimize dependencies.
type jsonError struct {
Message string `json:"message,omitempty"`
}
type jsonMessage struct {
Error *jsonError `json:"errorDetail,omitempty"`
}
dec := json.NewDecoder(resp.Body)
for {
var msg jsonMessage
if err := dec.Decode(&msg); err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("parsing docker load progress: %w", err)
}
if msg.Error != nil {
return fmt.Errorf("docker engine reported: %s", msg.Error.Message)
}
}
return nil // No error reported = success
}
// DesiredLayerCompression indicates if layers must be compressed, decompressed or preserved

View File

@ -24,6 +24,7 @@ import (
"github.com/docker/distribution/registry/api/errcode"
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
"golang.org/x/exp/slices"
)
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
@ -105,7 +106,7 @@ func makeErrorList(err error) []error {
}
func mergeErrors(err1, err2 error) error {
return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...))
return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...))
}
// handleErrorResponse returns error parsed from HTTP response for an

View File

@ -363,6 +363,11 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
hostname := registry
if registry == dockerHostname {
hostname = dockerV1Hostname
// A search term of library/foo does not find the library/foo image on the docker.io servers,
// which is surprising - and that Docker is modifying the search term client-side this same way,
// and it seems convenient to do the same thing.
// Read more here: https://github.com/containers/image/pull/2133#issue-1928524334
image = strings.TrimPrefix(image, "library/")
}
client, err := newDockerClient(sys, hostname, registry)

View File

@ -137,7 +137,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
if inputInfo.Digest == "" && d.c.sys.DockerRegistryPushPrecomputeDigests {
if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests {
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
if err != nil {
@ -341,39 +341,58 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
// Then try reusing blobs from other locations.
candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, options.CanSubstitute)
for _, candidate := range candidates {
candidateRepo, err := parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
var err error
compressionOperation, compressionAlgorithm, err := blobinfocache.OperationAndAlgorithmForCompressor(candidate.CompressorName)
if err != nil {
logrus.Debugf("OperationAndAlgorithmForCompressor Failed: %v", err)
continue
}
var candidateRepo reference.Named
if !candidate.UnknownLocation {
candidateRepo, err = parseBICLocationReference(candidate.Location)
if err != nil {
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
continue
}
}
if !impl.BlobMatchesRequiredCompression(options, compressionAlgorithm) {
requiredCompression := "nil"
if compressionAlgorithm != nil {
requiredCompression = compressionAlgorithm.Name()
}
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
if !candidate.UnknownLocation {
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) in %s", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression, candidateRepo.Name())
} else {
logrus.Debugf("Ignoring candidate blob %s as reuse candidate due to compression mismatch ( %s vs %s ) with no location match, checking current repo", candidate.Digest.String(), options.RequiredCompression.Name(), requiredCompression)
}
continue
}
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse cached location %s compressed with %s in %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
if !candidate.UnknownLocation {
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressorName, candidateRepo.Name())
} else {
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name())
}
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
// (the "from" parameter); in that case we might try to use these candidates as well.
//
// OTOH that would mean we cant do the “blobExists” check, and if there is no match
// we could get an upload request that we would have to cancel.
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
continue
}
} else {
logrus.Debugf("Trying to reuse cached location %s with no compression in %s", candidate.Digest.String(), candidateRepo.Name())
}
// Sanity checks:
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
// OCI distribution spec 1.1 allows mounting blobs without specifying the source repo
// (the "from" parameter); in that case we might try to use these candidates as well.
//
// OTOH that would mean we cant do the “blobExists” check, and if there is no match
// we could get an upload request that we would have to cancel.
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
continue
if candidate.CompressorName != blobinfocache.Uncompressed {
logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressorName)
} else {
logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String())
}
// This digest is a known variant of this blob but we dont
// have a recorded location in this registry, lets try looking
// for it in the current repo.
candidateRepo = reference.TrimNamed(d.ref.ref)
}
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
logrus.Debug("... Already tried the primary destination")
@ -688,6 +707,10 @@ func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.
}
}
// To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it.
ociManifest.Layers = slices.Clone(ociManifest.Layers)
// We dont need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to
// the slice in the original object (or in a newly allocated object).
for _, sig := range signatures {
mimeType := sig.UntrustedMIMEType()
payloadBlob := sig.UntrustedPayload()

View File

@ -88,7 +88,7 @@ func registryHTTPResponseToError(res *http.Response) error {
response = response[:50] + "..."
}
// %.0w makes e visible to error.Unwrap() without including any text
err = fmt.Errorf("StatusCode: %d, %s%.0w", e.StatusCode, response, e)
err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e)
case errcode.Error:
// e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually
// rather redundant. So reword it without using e.Code.Error() if e.Message is the default.

View File

@ -2,6 +2,8 @@ package image
import (
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/unparsedimage"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
@ -17,3 +19,23 @@ type UnparsedImage = image.UnparsedImage
func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage {
return image.UnparsedInstance(src, instanceDigest)
}
// unparsedWithRef wraps a private.UnparsedImage, claiming another replacementRef
type unparsedWithRef struct {
private.UnparsedImage
ref types.ImageReference
}
func (uwr *unparsedWithRef) Reference() types.ImageReference {
return uwr.ref
}
// UnparsedInstanceWithReference returns a types.UnparsedImage for wrappedInstance which claims to be a replacementRef.
// This is useful for combining image data with other reference values, e.g. to check signatures on a locally-pulled image
// based on a remote-registry policy.
func UnparsedInstanceWithReference(wrappedInstance types.UnparsedImage, replacementRef types.ImageReference) types.UnparsedImage {
return &unparsedWithRef{
UnparsedImage: unparsedimage.FromPublic(wrappedInstance),
ref: replacementRef,
}
}

View File

@ -32,7 +32,7 @@ type BlobInfoCache2 interface {
// otherwise the cache could be poisoned and cause us to make incorrect edits to type
// information in a manifest.
RecordDigestCompressorName(anyDigest digest.Digest, compressorName string)
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
@ -46,7 +46,8 @@ type BlobInfoCache2 interface {
// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2.
type BICReplacementCandidate2 struct {
Digest digest.Digest
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
Location types.BICLocationReference
Digest digest.Digest
CompressorName string // either the Name() of a known pkg/compression.Algorithm, or Uncompressed or UnknownCompression
UnknownLocation bool // is true when `Location` for this blob is not set
Location types.BICLocationReference // not set if UnknownLocation is set to `true`
}

View File

@ -196,14 +196,12 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti
return m.convertToManifestSchema2(ctx, options)
}
// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions.
// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format.
// If not, it returns (nil, nil).
// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos,
// and edits *options to not try decryption again.
func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool {
return info.CryptoOperation == types.Decrypt
}) {
func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) {
if options == nil || options.LayerInfos == nil {
return nil, nil
}
@ -212,19 +210,35 @@ func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.Manife
return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos))
}
res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate.
updatedEdits := slices.Clone(options.LayerInfos)
for i, info := range options.LayerInfos {
if info.CryptoOperation == types.Decrypt {
res[i].CryptoOperation = types.Decrypt
updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate.
laterEdits := slices.Clone(options.LayerInfos)
needsOCIOnlyEdits := false
for i, edit := range options.LayerInfos {
// Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal
ociOnlyEdits[i].CompressionAlgorithm = nil
if edit.CryptoOperation == types.Decrypt {
needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they cant be represented in Docker schemas
ociOnlyEdits[i].CryptoOperation = types.Decrypt
laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail.
}
if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd ||
originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images.
needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they cant be represented in Docker schemas.
ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation
ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm
laterEdits[i].CompressionOperation = types.PreserveOriginal
laterEdits[i].CompressionAlgorithm = nil
}
// Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit.
res[i].CompressionOperation = types.PreserveOriginal
res[i].CompressionAlgorithm = nil
}
options.LayerInfos = updatedEdits
return res, nil
if !needsOCIOnlyEdits {
return nil, nil
}
options.LayerInfos = laterEdits
return ociOnlyEdits, nil
}
// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType.
@ -238,15 +252,15 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
// Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits
// which remove OCI-specific features, because trying to convert those layers would fail.
// So, do the layer updates for decryption.
// So, do the layer updates for decryption, and for conversions from Zstd.
ociManifest := m.m
layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options)
ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options)
if err != nil {
return nil, err
}
if layerDecryptEdits != nil {
if ociOnlyEdits != nil {
ociManifest = manifest.OCI1Clone(ociManifest)
if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil {
if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil {
return nil, err
}
}
@ -275,9 +289,8 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *type
layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
case imgspecv1.MediaTypeImageLayerZstd:
return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType)
// FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released
case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc,
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc:
ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc:
return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType)
default:
return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType)

View File

@ -12,6 +12,11 @@ func BlobMatchesRequiredCompression(options private.TryReusingBlobOptions, candi
if options.RequiredCompression == nil {
return true // no requirement imposed
}
if options.RequiredCompression.Name() == compression.ZstdChunkedAlgorithmName {
// HACK: Never match when the caller asks for zstd:chunked, because we dont record the annotations required to use the chunked blobs.
// The caller must re-compress to build those annotations.
return false
}
return candidateCompression != nil && (options.RequiredCompression.Name() == candidateCompression.Name())
}

View File

@ -133,7 +133,9 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
}
}
if len(addedEntries) != 0 {
index.Manifests = append(index.Manifests, addedEntries...)
// slices.Clone() here to ensure a private backing array;
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
}
return nil
}

View File

@ -167,7 +167,9 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
}
}
if len(addedEntries) != 0 {
index.Manifests = append(index.Manifests, addedEntries...)
// slices.Clone() here to ensure the slice uses a private backing array;
// an external caller could have manually created OCI1IndexPublic with a slice with extra capacity.
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
}
if len(addedEntries) != 0 || updatedAnnotations {
slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int {
@ -220,7 +222,7 @@ func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip
case ic.manifestPosition != other.manifestPosition:
return ic.manifestPosition < other.manifestPosition
}
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
}
// chooseInstance is a private equivalent to ChooseInstanceByCompression,

View File

@ -28,6 +28,18 @@ func (e ImageNotFoundError) Error() string {
return fmt.Sprintf("no descriptor found for reference %q", e.ref.image)
}
// ArchiveFileNotFoundError occurs when the archive file does not exist.
type ArchiveFileNotFoundError struct {
// ref is the image reference
ref ociArchiveReference
// path is the file path that was not present
path string
}
func (e ArchiveFileNotFoundError) Error() string {
return fmt.Sprintf("archive file not found: %q", e.path)
}
type ociArchiveImageSource struct {
impl.Compat

View File

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"strings"
@ -171,18 +172,24 @@ func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error)
// creates the temporary directory and copies the tarred content to it
func createUntarTempDir(sys *types.SystemContext, ref ociArchiveReference) (tempDirOCIRef, error) {
src := ref.resolvedFile
arch, err := os.Open(src)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return tempDirOCIRef{}, ArchiveFileNotFoundError{ref: ref, path: src}
} else {
return tempDirOCIRef{}, err
}
}
defer arch.Close()
tempDirRef, err := createOCIRef(sys, ref.image)
if err != nil {
return tempDirOCIRef{}, fmt.Errorf("creating oci reference: %w", err)
}
src := ref.resolvedFile
dst := tempDirRef.tempDirectory
// TODO: This can take quite some time, and should ideally be cancellable using a context.Context.
arch, err := os.Open(src)
if err != nil {
return tempDirOCIRef{}, err
}
defer arch.Close()
if err := archive.NewDefaultArchiver().Untar(arch, dst, &archive.TarOptions{NoLchown: true}); err != nil {
if err := tempDirRef.deleteTempDir(); err != nil {
return tempDirOCIRef{}, fmt.Errorf("deleting temp directory %q: %w", tempDirRef.tempDirectory, err)

View File

@ -0,0 +1,240 @@
package layout
import (
"context"
"encoding/json"
"fmt"
"io/fs"
"os"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// DeleteImage deletes the named image from the directory, if supported.
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
sharedBlobsDir := ""
if sys != nil && sys.OCISharedBlobDirPath != "" {
sharedBlobsDir = sys.OCISharedBlobDirPath
}
descriptor, descriptorIndex, err := ref.getManifestDescriptor()
if err != nil {
return err
}
var blobsUsedByImage map[digest.Digest]int
switch descriptor.MediaType {
case imgspecv1.MediaTypeImageManifest:
blobsUsedByImage, err = ref.getBlobsUsedInSingleImage(&descriptor, sharedBlobsDir)
case imgspecv1.MediaTypeImageIndex:
blobsUsedByImage, err = ref.getBlobsUsedInImageIndex(&descriptor, sharedBlobsDir)
default:
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
}
if err != nil {
return err
}
blobsToDelete, err := ref.getBlobsToDelete(blobsUsedByImage, sharedBlobsDir)
if err != nil {
return err
}
err = ref.deleteBlobs(blobsToDelete)
if err != nil {
return err
}
return ref.deleteReferenceFromIndex(descriptorIndex)
}
func (ref ociReference) getBlobsUsedInSingleImage(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
manifest, err := ref.getManifest(descriptor, sharedBlobsDir)
if err != nil {
return nil, err
}
blobsUsedInManifest := ref.getBlobsUsedInManifest(manifest)
blobsUsedInManifest[descriptor.Digest]++ // Add the current manifest to the list of blobs used by this reference
return blobsUsedInManifest, nil
}
func (ref ociReference) getBlobsUsedInImageIndex(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (map[digest.Digest]int, error) {
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
if err != nil {
return nil, err
}
index, err := parseIndex(blobPath)
if err != nil {
return nil, err
}
blobsUsedInImageRefIndex := make(map[digest.Digest]int)
err = ref.addBlobsUsedInIndex(blobsUsedInImageRefIndex, index, sharedBlobsDir)
if err != nil {
return nil, err
}
blobsUsedInImageRefIndex[descriptor.Digest]++ // Add the nested index in the list of blobs used by this reference
return blobsUsedInImageRefIndex, nil
}
// Updates a map of digest with the usage count, so a blob that is referenced three times will have 3 in the map
func (ref ociReference) addBlobsUsedInIndex(destination map[digest.Digest]int, index *imgspecv1.Index, sharedBlobsDir string) error {
for _, descriptor := range index.Manifests {
destination[descriptor.Digest]++
switch descriptor.MediaType {
case imgspecv1.MediaTypeImageManifest:
manifest, err := ref.getManifest(&descriptor, sharedBlobsDir)
if err != nil {
return err
}
for digest, count := range ref.getBlobsUsedInManifest(manifest) {
destination[digest] += count
}
case imgspecv1.MediaTypeImageIndex:
blobPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
if err != nil {
return err
}
index, err := parseIndex(blobPath)
if err != nil {
return err
}
err = ref.addBlobsUsedInIndex(destination, index, sharedBlobsDir)
if err != nil {
return err
}
default:
return fmt.Errorf("unsupported mediaType in index: %q", descriptor.MediaType)
}
}
return nil
}
func (ref ociReference) getBlobsUsedInManifest(manifest *imgspecv1.Manifest) map[digest.Digest]int {
blobsUsedInManifest := make(map[digest.Digest]int, 0)
blobsUsedInManifest[manifest.Config.Digest]++
for _, layer := range manifest.Layers {
blobsUsedInManifest[layer.Digest]++
}
return blobsUsedInManifest
}
// This takes in a map of the digest and their usage count in the manifest to be deleted
// It will compare it to the digest usage in the root index, and return a set of the blobs that can be safely deleted
func (ref ociReference) getBlobsToDelete(blobsUsedByDescriptorToDelete map[digest.Digest]int, sharedBlobsDir string) (*set.Set[digest.Digest], error) {
rootIndex, err := ref.getIndex()
if err != nil {
return nil, err
}
blobsUsedInRootIndex := make(map[digest.Digest]int)
err = ref.addBlobsUsedInIndex(blobsUsedInRootIndex, rootIndex, sharedBlobsDir)
if err != nil {
return nil, err
}
blobsToDelete := set.New[digest.Digest]()
for digest, count := range blobsUsedInRootIndex {
if count-blobsUsedByDescriptorToDelete[digest] == 0 {
blobsToDelete.Add(digest)
}
}
return blobsToDelete, nil
}
// This transport never generates layouts where blobs for an image are both in the local blobs directory
// and the shared one; its either one or the other, depending on how OCISharedBlobDirPath is set.
//
// But we cant correctly compute use counts for OCISharedBlobDirPath (because we don't know what
// the other layouts sharing that directory are, and we might not even have permission to read them),
// so we cant really delete any blobs in that case.
// Checking the _local_ blobs directory, and deleting blobs from there, doesn't really hurt,
// in case the layout was created using some other tool or without OCISharedBlobDirPath set, so let's silently
// check for local blobs (but we should make no noise if the blobs are actually in the shared directory).
//
// So, NOTE: the blobPath() call below hard-codes "" even in calls where OCISharedBlobDirPath is set
func (ref ociReference) deleteBlobs(blobsToDelete *set.Set[digest.Digest]) error {
for _, digest := range blobsToDelete.Values() {
blobPath, err := ref.blobPath(digest, "") //Only delete in the local directory, see comment above
if err != nil {
return err
}
err = deleteBlob(blobPath)
if err != nil {
return err
}
}
return nil
}
func deleteBlob(blobPath string) error {
logrus.Debug(fmt.Sprintf("Deleting blob at %q", blobPath))
err := os.Remove(blobPath)
if err != nil && !os.IsNotExist(err) {
return err
} else {
return nil
}
}
func (ref ociReference) deleteReferenceFromIndex(referenceIndex int) error {
index, err := ref.getIndex()
if err != nil {
return err
}
index.Manifests = slices.Delete(index.Manifests, referenceIndex, referenceIndex+1)
return saveJSON(ref.indexPath(), index)
}
func saveJSON(path string, content any) error {
// If the file already exists, get its mode to preserve it
var mode fs.FileMode
existingfi, err := os.Stat(path)
if err != nil {
if !os.IsNotExist(err) {
return err
} else { // File does not exist, use default mode
mode = 0644
}
} else {
mode = existingfi.Mode()
}
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
defer file.Close()
return json.NewEncoder(file).Encode(content)
}
func (ref ociReference) getManifest(descriptor *imgspecv1.Descriptor, sharedBlobsDir string) (*imgspecv1.Manifest, error) {
manifestPath, err := ref.blobPath(descriptor.Digest, sharedBlobsDir)
if err != nil {
return nil, err
}
manifest, err := parseJSON[imgspecv1.Manifest](manifestPath)
if err != nil {
return nil, err
}
return manifest, nil
}

View File

@ -19,6 +19,7 @@ import (
digest "github.com/opencontainers/go-digest"
imgspec "github.com/opencontainers/image-spec/specs-go"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/exp/slices"
)
type ociImageDestination struct {
@ -84,7 +85,7 @@ func newImageDestination(sys *types.SystemContext, ref ociReference) (private.Im
// Per the OCI image specification, layouts MUST have a "blobs" subdirectory,
// but it MAY be empty (e.g. if we never end up calling PutBlob)
// https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19
if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil {
if err := ensureDirectoryExists(filepath.Join(d.ref.dir, imgspecv1.ImageBlobsDir)); err != nil {
return nil, err
}
return d, nil
@ -271,8 +272,8 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
return
}
}
// It's a new entry to be added to the index.
d.index.Manifests = append(d.index.Manifests, *desc)
// It's a new entry to be added to the index. Use slices.Clone() to avoid a remote dependency on how d.index was created.
d.index.Manifests = append(slices.Clone(d.index.Manifests), *desc)
}
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
@ -283,7 +284,13 @@ func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) {
// - Uploaded data MAY be visible to others before Commit() is called
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
func (d *ociImageDestination) Commit(context.Context, types.UnparsedImage) error {
if err := os.WriteFile(d.ref.ociLayoutPath(), []byte(`{"imageLayoutVersion": "1.0.0"}`), 0644); err != nil {
layoutBytes, err := json.Marshal(imgspecv1.ImageLayout{
Version: imgspecv1.ImageLayoutVersion,
})
if err != nil {
return err
}
if err := os.WriteFile(d.ref.ociLayoutPath(), layoutBytes, 0644); err != nil {
return err
}
indexJSON, err := json.Marshal(d.index)

View File

@ -60,7 +60,7 @@ func newImageSource(sys *types.SystemContext, ref ociReference) (private.ImageSo
client := &http.Client{}
client.Transport = tr
descriptor, err := ref.getManifestDescriptor()
descriptor, _, err := ref.getManifestDescriptor()
if err != nil {
return nil, err
}

View File

@ -160,48 +160,56 @@ func (ref ociReference) NewImage(ctx context.Context, sys *types.SystemContext)
// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together
// with an error.
func (ref ociReference) getIndex() (*imgspecv1.Index, error) {
indexJSON, err := os.Open(ref.indexPath())
if err != nil {
return nil, err
}
defer indexJSON.Close()
index := &imgspecv1.Index{}
if err := json.NewDecoder(indexJSON).Decode(index); err != nil {
return nil, err
}
return index, nil
return parseIndex(ref.indexPath())
}
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) {
func parseIndex(path string) (*imgspecv1.Index, error) {
return parseJSON[imgspecv1.Index](path)
}
func parseJSON[T any](path string) (*T, error) {
content, err := os.Open(path)
if err != nil {
return nil, err
}
defer content.Close()
obj := new(T)
if err := json.NewDecoder(content).Decode(obj); err != nil {
return nil, err
}
return obj, nil
}
func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, error) {
index, err := ref.getIndex()
if err != nil {
return imgspecv1.Descriptor{}, err
return imgspecv1.Descriptor{}, -1, err
}
if ref.image == "" {
// return manifest if only one image is in the oci directory
if len(index.Manifests) != 1 {
// ask user to choose image when more than one image in the oci directory
return imgspecv1.Descriptor{}, ErrMoreThanOneImage
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
}
return index.Manifests[0], nil
return index.Manifests[0], 0, nil
} else {
// if image specified, look through all manifests for a match
var unsupportedMIMETypes []string
for _, md := range index.Manifests {
for i, md := range index.Manifests {
if refName, ok := md.Annotations[imgspecv1.AnnotationRefName]; ok && refName == ref.image {
if md.MediaType == imgspecv1.MediaTypeImageManifest || md.MediaType == imgspecv1.MediaTypeImageIndex {
return md, nil
return md, i, nil
}
unsupportedMIMETypes = append(unsupportedMIMETypes, md.MediaType)
}
}
if len(unsupportedMIMETypes) != 0 {
return imgspecv1.Descriptor{}, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
}
}
return imgspecv1.Descriptor{}, ImageNotFoundError{ref}
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
}
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
@ -211,7 +219,8 @@ func LoadManifestDescriptor(imgRef types.ImageReference) (imgspecv1.Descriptor,
if !ok {
return imgspecv1.Descriptor{}, errors.New("error typecasting, need type ociRef")
}
return ociRef.getManifestDescriptor()
md, _, err := ociRef.getManifestDescriptor()
return md, err
}
// NewImageSource returns a types.ImageSource for this reference.
@ -226,19 +235,14 @@ func (ref ociReference) NewImageDestination(ctx context.Context, sys *types.Syst
return newImageDestination(sys, ref)
}
// DeleteImage deletes the named image from the registry, if supported.
func (ref ociReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error {
return errors.New("Deleting images not implemented for oci: images")
}
// ociLayoutPath returns a path for the oci-layout within a directory using OCI conventions.
func (ref ociReference) ociLayoutPath() string {
return filepath.Join(ref.dir, "oci-layout")
return filepath.Join(ref.dir, imgspecv1.ImageLayoutFile)
}
// indexPath returns a path for the index.json within a directory using OCI conventions.
func (ref ociReference) indexPath() string {
return filepath.Join(ref.dir, "index.json")
return filepath.Join(ref.dir, imgspecv1.ImageIndexFile)
}
// blobPath returns a path for a blob within a directory using OCI image-layout conventions.
@ -246,9 +250,11 @@ func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (st
if err := digest.Validate(); err != nil {
return "", fmt.Errorf("unexpected digest reference %s: %w", digest, err)
}
blobDir := filepath.Join(ref.dir, "blobs")
var blobDir string
if sharedBlobDir != "" {
blobDir = sharedBlobDir
} else {
blobDir = filepath.Join(ref.dir, imgspecv1.ImageBlobsDir)
}
return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil
}

View File

@ -10,15 +10,20 @@ import (
"github.com/opencontainers/go-digest"
)
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
// replacementAttempts is the number of blob replacement candidates with known location returned by destructivelyPrioritizeReplacementCandidates,
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
// This is a heuristic/guess, and could well use a different value.
const replacementAttempts = 5
// replacementUnknownLocationAttempts is the number of blob replacement candidates with unknown Location returned by destructivelyPrioritizeReplacementCandidates,
// and therefore ultimately by blobinfocache.BlobInfoCache2.CandidateLocations2.
// This is a heuristic/guess, and could well use a different value.
const replacementUnknownLocationAttempts = 2
// CandidateWithTime is the input to types.BICReplacementCandidate prioritization.
type CandidateWithTime struct {
Candidate blobinfocache.BICReplacementCandidate2 // The replacement candidate
LastSeen time.Time // Time the candidate was last known to exist (either read or written)
LastSeen time.Time // Time the candidate was last known to exist (either read or written) (not set for Candidate.UnknownLocation)
}
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
@ -77,9 +82,22 @@ func (css *candidateSortState) Swap(i, j int) {
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
}
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
// number of entries to limit, only to make testing simpler.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 {
func min(a, b int) int {
if a < b {
return a
}
return b
}
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
// number of entries to limit for known and unknown location separately, only to make testing simpler.
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
// split unknown candidates and known candidates
// and limit them seperately.
var knownLocationCandidates []CandidateWithTime
var unknownLocationCandidates []CandidateWithTime
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
// compare equal.
// FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available.
@ -88,24 +106,34 @@ func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime,
primaryDigest: primaryDigest,
uncompressedDigest: uncompressedDigest,
})
resLength := len(cs)
if resLength > maxCandidates {
resLength = maxCandidates
for _, candidate := range cs {
if candidate.Candidate.UnknownLocation {
unknownLocationCandidates = append(unknownLocationCandidates, candidate)
} else {
knownLocationCandidates = append(knownLocationCandidates, candidate)
}
}
res := make([]blobinfocache.BICReplacementCandidate2, resLength)
for i := range res {
res[i] = cs[i].Candidate
knownLocationCandidatesUsed := min(len(knownLocationCandidates), totalLimit)
remainingCapacity := totalLimit - knownLocationCandidatesUsed
unknownLocationCandidatesUsed := min(noLocationLimit, min(remainingCapacity, len(unknownLocationCandidates)))
res := make([]blobinfocache.BICReplacementCandidate2, knownLocationCandidatesUsed)
for i := 0; i < knownLocationCandidatesUsed; i++ {
res[i] = knownLocationCandidates[i].Candidate
}
// If candidates with unknown location are found, lets add them to final list
for i := 0; i < unknownLocationCandidatesUsed; i++ {
res = append(res, unknownLocationCandidates[i].Candidate)
}
return res
}
// DestructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
// the primary digest the user actually asked for, the corresponding uncompressed digest (if known, possibly equal to the primary digest) returns an
// appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
//
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
func DestructivelyPrioritizeReplacementCandidates(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []blobinfocache.BICReplacementCandidate2 {
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts, replacementUnknownLocationAttempts)
}

View File

@ -133,24 +133,39 @@ func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compresso
mem.compressors[blobDigest] = compressorName
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime {
// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in memory
// with corresponding compression info from mem.compressors, and returns the result of appending
// them to candidates. v2Output allows including candidates with unknown location, and filters out
// candidates with unknown compression.
func (mem *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) []prioritize.CandidateWithTime {
compressorName := blobinfocache.UnknownCompression
if v, ok := mem.compressors[digest]; ok {
compressorName = v
}
if compressorName == blobinfocache.UnknownCompression && v2Output {
return candidates
}
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
for l, t := range locations {
compressorName, compressorKnown := mem.compressors[digest]
if !compressorKnown {
if requireCompressionInfo {
continue
}
compressorName = blobinfocache.UnknownCompression
if len(locations) > 0 {
for l, t := range locations {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: l,
},
LastSeen: t,
})
}
} else if v2Output {
candidates = append(candidates, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
Location: l,
Digest: digest,
CompressorName: compressorName,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: t,
LastSeen: time.Time{},
})
}
return candidates
@ -166,7 +181,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
return blobinfocache.CandidateLocationsFromV2(mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, false))
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
// within the specified (transport scope) (if they still exist, which is not guaranteed).
//
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
@ -176,23 +191,24 @@ func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope type
return mem.candidateLocations(transport, scope, primaryDigest, canSubstitute, true)
}
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
func (mem *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
mem.mutex.Lock()
defer mem.mutex.Unlock()
res := []prioritize.CandidateWithTime{}
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, requireCompressionInfo)
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest, v2Output)
var uncompressedDigest digest.Digest // = ""
if canSubstitute {
if uncompressedDigest = mem.uncompressedDigestLocked(primaryDigest); uncompressedDigest != "" {
if otherDigests, ok := mem.digestsByUncompressed[uncompressedDigest]; ok {
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
if otherDigests != nil {
for _, d := range otherDigests.Values() {
if d != primaryDigest && d != uncompressedDigest {
res = mem.appendReplacementCandidates(res, transport, scope, d, requireCompressionInfo)
res = mem.appendReplacementCandidates(res, transport, scope, d, v2Output)
}
}
}
if uncompressedDigest != primaryDigest {
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, requireCompressionInfo)
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest, v2Output)
}
}
}

View File

@ -57,7 +57,7 @@ type cache struct {
// The database/sql package says “It is rarely necessary to close a DB.”, and steers towards a long-term *sql.DB connection pool.
// Thats probably very applicable for database-backed services, where the database is the primary data store. Thats not necessarily
// the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly
// the case for callers of c/image, where image operations might be a small proportion of the total runtime, and the cache is fairly
// incidental even to the image operations. Its also hard for us to use that model, because the public BlobInfoCache object doesnt have
// a Close method, so creating a lot of single-use caches could leak data.
//
@ -117,7 +117,7 @@ func (sqc *cache) Open() {
if sqc.refCount == 0 {
db, err := rawOpen(sqc.path)
if err != nil {
logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err)
logrus.Warnf("Error opening (previously-successfully-opened) blob info cache at %q: %v", sqc.path, err)
db = nil // But still increase sqc.refCount, because a .Close() will happen
}
sqc.db = db
@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
// dbTransaction calls fn within a read-write transaction in db.
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
// Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion.
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
var zeroRes T // A zero value of T
@ -249,7 +249,7 @@ func ensureDBHasCurrentSchema(db *sql.DB) error {
// * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests.
//
// Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups)
// join to translate from/to the user-provided digests anyway. If anything, that extra join (potentially more btree lookups)
// is probably costlier than comparing a few more bytes of data.
//
// Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without
@ -427,11 +427,13 @@ func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressor
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
}
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) {
// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest),
// and returns the result of appending them to candidates. v2Output allows including candidates with unknown
// location, and filters out candidates with unknown compression.
func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, v2Output bool) ([]prioritize.CandidateWithTime, error) {
var rows *sql.Rows
var err error
if requireCompressionInfo {
if v2Output {
rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+
"ON KnownLocations.digest = DigestCompressors.digest "+
"WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?",
@ -448,6 +450,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
}
defer rows.Close()
res := []prioritize.CandidateWithTime{}
for rows.Next() {
var location string
var time time.Time
@ -455,7 +458,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
if err := rows.Scan(&location, &time, &compressorName); err != nil {
return nil, fmt.Errorf("scanning candidate: %w", err)
}
candidates = append(candidates, prioritize.CandidateWithTime{
res = append(res, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressorName,
@ -467,10 +470,29 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
if err := rows.Err(); err != nil {
return nil, fmt.Errorf("iterating through locations: %w", err)
}
if len(res) == 0 && v2Output {
compressor, found, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", digest.String())
if err != nil {
return nil, fmt.Errorf("scanning compressorName: %w", err)
}
if found {
res = append(res, prioritize.CandidateWithTime{
Candidate: blobinfocache.BICReplacementCandidate2{
Digest: digest,
CompressorName: compressor,
UnknownLocation: true,
Location: types.BICLocationReference{Opaque: ""},
},
LastSeen: time.Time{},
})
}
}
candidates = append(candidates, res...)
return candidates, nil
}
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known)
// that could possibly be reused within the specified (transport scope) (if they still
// exist, which is not guaranteed).
//
@ -483,11 +505,11 @@ func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope type
return sqc.candidateLocations(transport, scope, digest, canSubstitute, true)
}
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 {
func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, v2Output bool) []blobinfocache.BICReplacementCandidate2 {
var uncompressedDigest digest.Digest // = ""
res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) {
res := []prioritize.CandidateWithTime{}
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo)
res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, v2Output)
if err != nil {
return nil, err
}
@ -516,7 +538,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
return nil, err
}
if otherDigest != primaryDigest && otherDigest != uncompressedDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo)
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, v2Output)
if err != nil {
return nil, err
}
@ -527,7 +549,7 @@ func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types
}
if uncompressedDigest != primaryDigest {
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo)
res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, v2Output)
if err != nil {
return nil, err
}

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"os/exec"
"path/filepath"
@ -61,78 +62,6 @@ func newAuthPathDefault(path string) authPath {
return authPath{path: path, legacyFormat: false}
}
// SetCredentials stores the username and password in a location
// appropriate for sys and the users configuration.
// A valid key is a repository, a namespace within a registry, or a registry hostname;
// using forms other than just a registry may fail depending on configuration.
// Returns a human-readable description of the location that was updated.
// NOTE: The return value is only intended to be read by humans; its form is not an API,
// it may change (or new forms can be added) any time.
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
isNamespaced, err := validateKey(key)
if err != nil {
return "", err
}
helpers, err := sysregistriesv2.CredentialHelpers(sys)
if err != nil {
return "", err
}
// Make sure to collect all errors.
var multiErr error
for _, helper := range helpers {
var desc string
var err error
switch helper {
// Special-case the built-in helpers for auth files.
case sysregistriesv2.AuthenticationFileHelper:
desc, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
if ch, exists := fileContents.CredHelpers[key]; exists {
if isNamespaced {
return false, "", unsupportedNamespaceErr(ch)
}
desc, err := setCredsInCredHelper(ch, key, username, password)
if err != nil {
return false, "", err
}
return false, desc, nil
}
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
newCreds := dockerAuthConfig{Auth: creds}
fileContents.AuthConfigs[key] = newCreds
return true, "", nil
})
// External helpers.
default:
if isNamespaced {
err = unsupportedNamespaceErr(helper)
} else {
desc, err = setCredsInCredHelper(helper, key, username, password)
}
}
if err != nil {
multiErr = multierror.Append(multiErr, err)
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
continue
}
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
return desc, nil
}
return "", multiErr
}
func unsupportedNamespaceErr(helper string) error {
return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
}
// SetAuthentication stores the username and password in the credential helper or file
// See the documentation of SetCredentials for format of "key"
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
_, err := SetCredentials(sys, key, username, password)
return err
}
// GetAllCredentials returns the registry credentials for all registries stored
// in any of the configured credential helpers.
func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) {
@ -370,17 +299,79 @@ func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string)
return creds.Username, creds.Password, nil
}
// SetCredentials stores the username and password in a location
// appropriate for sys and the users configuration.
// A valid key is a repository, a namespace within a registry, or a registry hostname;
// using forms other than just a registry may fail depending on configuration.
// Returns a human-readable description of the location that was updated.
// NOTE: The return value is only intended to be read by humans; its form is not an API,
// it may change (or new forms can be added) any time.
func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) {
helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
if err != nil {
return "", err
}
// Make sure to collect all errors.
var multiErr error
for _, helper := range helpers {
var desc string
var err error
switch helper {
// Special-case the built-in helpers for auth files.
case sysregistriesv2.AuthenticationFileHelper:
desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
if ch, exists := fileContents.CredHelpers[key]; exists {
if isNamespaced {
return false, "", unsupportedNamespaceErr(ch)
}
desc, err := setCredsInCredHelper(ch, key, username, password)
if err != nil {
return false, "", err
}
return false, desc, nil
}
creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password))
newCreds := dockerAuthConfig{Auth: creds}
fileContents.AuthConfigs[key] = newCreds
return true, "", nil
})
// External helpers.
default:
if isNamespaced {
err = unsupportedNamespaceErr(helper)
} else {
desc, err = setCredsInCredHelper(helper, key, username, password)
}
}
if err != nil {
multiErr = multierror.Append(multiErr, err)
logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err)
continue
}
logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper)
return desc, nil
}
return "", multiErr
}
func unsupportedNamespaceErr(helper string) error {
return fmt.Errorf("namespaced key is not supported for credential helper %s", helper)
}
// SetAuthentication stores the username and password in the credential helper or file
// See the documentation of SetCredentials for format of "key"
func SetAuthentication(sys *types.SystemContext, key, username, password string) error {
_, err := SetCredentials(sys, key, username, password)
return err
}
// RemoveAuthentication removes credentials for `key` from all possible
// sources such as credential helpers and auth files.
// A valid key is a repository, a namespace within a registry, or a registry hostname;
// using forms other than just a registry may fail depending on configuration.
func RemoveAuthentication(sys *types.SystemContext, key string) error {
isNamespaced, err := validateKey(key)
if err != nil {
return err
}
helpers, err := sysregistriesv2.CredentialHelpers(sys)
helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true)
if err != nil {
return err
}
@ -411,7 +402,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
if innerHelper, exists := fileContents.CredHelpers[key]; exists {
removeFromCredHelper(innerHelper)
}
@ -443,7 +434,7 @@ func RemoveAuthentication(sys *types.SystemContext, key string) error {
// RemoveAllAuthentication deletes all the credentials stored in credential
// helpers and auth files.
func RemoveAllAuthentication(sys *types.SystemContext) error {
helpers, err := sysregistriesv2.CredentialHelpers(sys)
helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false)
if err != nil {
return err
}
@ -454,7 +445,7 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
switch helper {
// Special-case the built-in helper for auth files.
case sysregistriesv2.AuthenticationFileHelper:
_, err = modifyJSON(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
_, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) {
for registry, helper := range fileContents.CredHelpers {
// Helpers in auth files are expected
// to exist, so no special treatment
@ -497,6 +488,46 @@ func RemoveAllAuthentication(sys *types.SystemContext) error {
return multiErr
}
// prepareForEdit processes sys and key (if keyRelevant) to return:
// - a list of credential helpers
// - a function which can be used to edit the JSON file
// - the key value to actually use in credential helpers / JSON
// - a boolean which is true if key is namespaced (and should not be used with credential helpers).
func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) {
var isNamespaced bool
if keyRelevant {
ns, err := validateKey(key)
if err != nil {
return nil, nil, "", false, err
}
isNamespaced = ns
}
if sys != nil && sys.DockerCompatAuthFilePath != "" {
if sys.AuthFilePath != "" {
return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
}
if keyRelevant {
if isNamespaced {
return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key)
}
if key == "docker.io" {
key = "https://index.docker.io/v1/"
}
}
// Do not use helpers defined in sysregistriesv2 because Docker isnt aware of them.
return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil
}
helpers, err := sysregistriesv2.CredentialHelpers(sys)
if err != nil {
return nil, nil, "", false, err
}
return helpers, modifyJSON, key, isNamespaced, nil
}
func listCredsInCredHelper(credHelper string) (map[string]string, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)
@ -513,9 +544,17 @@ func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) {
// it exists only to allow testing it with an artificial runtime.GOOS.
func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) {
if sys != nil {
if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" {
return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously")
}
if sys.AuthFilePath != "" {
return newAuthPathDefault(sys.AuthFilePath), true, nil
}
// When reading, we can process auth.json and Dockers config.json with the same code.
// When writing, prepareForEdit chooses an appropriate jsonEditor implementation.
if sys.DockerCompatAuthFilePath != "" {
return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil
}
if sys.LegacyFormatAuthFilePath != "" {
return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil
}
@ -626,6 +665,86 @@ func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfig
return description, nil
}
// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and
// writes it back if editor returns true.
// Returns a human-readable description of the file, to be returned by SetCredentials.
//
// The editor may also return a human-readable description of the updated location; if it is "",
// the file itself is used.
func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) {
if sys == nil || sys.DockerCompatAuthFilePath == "" {
return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set")
}
path := sys.DockerCompatAuthFilePath
dir := filepath.Dir(path)
if err := os.MkdirAll(dir, 0700); err != nil {
return "", err
}
// Try hard not to clobber fields we dont understand, even fields which may be added in future Docker versions.
var rawContents map[string]json.RawMessage
originalBytes, err := os.ReadFile(path)
switch {
case err == nil:
if err := json.Unmarshal(originalBytes, &rawContents); err != nil {
return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err)
}
case errors.Is(err, fs.ErrNotExist):
rawContents = map[string]json.RawMessage{}
default: // err != nil
return "", err
}
syntheticContents := dockerConfigFile{
AuthConfigs: map[string]dockerAuthConfig{},
CredHelpers: map[string]string{},
}
// json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably
// config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with
// unexpected case.
if rawAuths, ok := rawContents["auths"]; ok {
// This conversion will lose fields we dont know about; when updating an entry, we cant tell whether an unknown field
// should be preserved or discarded (because it is made obsolete/unwanted with the new credentials).
// It might make sense to track which entries of "auths" we actually modified, and to not touch any others.
if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil {
return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err)
}
}
if rawCH, ok := rawContents["credHelpers"]; ok {
if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil {
return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err)
}
}
updated, description, err := editor(&syntheticContents)
if err != nil {
return "", fmt.Errorf("updating %q: %w", path, err)
}
if updated {
rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t")
if err != nil {
return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
}
rawContents["auths"] = rawAuths
// We never modify syntheticContents.CredHelpers, so we dont need to update it.
newData, err := json.MarshalIndent(rawContents, "", "\t")
if err != nil {
return "", fmt.Errorf("marshaling JSON %q: %w", path, err)
}
if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil {
return "", fmt.Errorf("writing to file %q: %w", path, err)
}
}
if description == "" {
description = path
}
return description, nil
}
func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) {
helperName := fmt.Sprintf("docker-credential-%s", credHelper)
p := helperclient.NewShellProgramFunc(helperName)

View File

@ -66,7 +66,7 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
if err != nil {
return err
}
tlsc.Certificates = append(tlsc.Certificates, cert)
tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert)
}
if strings.HasSuffix(f.Name(), ".key") {
keyName := f.Name()

View File

@ -10,6 +10,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
"github.com/containers/storage"
digest "github.com/opencontainers/go-digest"
@ -101,6 +102,8 @@ func multiArchImageMatchesSystemContext(store storage.Store, img *storage.Image,
// Resolve the reference's name to an image ID in the store, if there's already
// one present with the same name or ID, and return the image.
//
// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Image, error) {
var loadedImage *storage.Image
if s.id == "" && s.named != nil {
@ -283,3 +286,31 @@ func (s storageReference) NewImageSource(ctx context.Context, sys *types.SystemC
func (s storageReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) {
return newImageDestination(sys, s)
}
// ResolveReference finds the underlying storage image for a storage.Transport reference.
// It returns that image, and an updated reference which can be used to refer back to the _same_
// image again.
//
// This matters if the input reference contains a tagged name; the destination of the tag can
// move in local storage. The updated reference returned by this function contains the resolved
// image ID, so later uses of that updated reference will either continue to refer to the same
// image, or fail.
//
// Note that it _is_ possible for the later uses to fail, either because the image was removed
// completely, or because the name used in the reference was untaged (even if the underlying image
// ID still exists in local storage).
//
// Returns an error matching ErrNoSuchImage if an image matching ref was not found.
func ResolveReference(ref types.ImageReference) (types.ImageReference, *storage.Image, error) {
sref, ok := ref.(*storageReference)
if !ok {
return nil, nil, fmt.Errorf("trying to resolve a non-%s: reference %q", Transport.Name(),
transports.ImageName(ref))
}
clone := *sref // A shallow copy we can update
img, err := clone.resolveImage(nil)
if err != nil {
return nil, nil, err
}
return clone, img, nil
}

View File

@ -48,9 +48,26 @@ type StoreTransport interface {
GetStoreIfSet() storage.Store
// GetImage retrieves the image from the transport's store that's named
// by the reference.
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
GetImage(types.ImageReference) (*storage.Image, error)
// GetStoreImage retrieves the image from a specified store that's named
// by the reference.
//
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
GetStoreImage(storage.Store, types.ImageReference) (*storage.Image, error)
// ParseStoreReference parses a reference, overriding any store
// specification that it may contain.
@ -290,6 +307,15 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc
return s.ParseStoreReference(store, reference)
}
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Also, a StoreTransport reference already contains a store, so providing another one is redundant.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) {
dref := ref.DockerReference()
if dref != nil {
@ -306,6 +332,13 @@ func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageRefe
return nil, storage.ErrImageUnknown
}
// Deprecated: Surprisingly, with a StoreTransport reference which contains an ID,
// this ignores that ID; and repeated calls of GetStoreImage with the same named reference
// can return different images, with no way for the caller to "freeze" the storage.Image identity
// without discarding the name entirely.
//
// Use storage.ResolveReference instead; note that if the image is not found, ResolveReference returns
// c/image/v5/storage.ErrNoSuchImage, not c/storage.ErrImageUnknown.
func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) {
store, err := s.GetStore()
if err != nil {

View File

@ -445,7 +445,7 @@ type ImageCloser interface {
Close() error
}
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage
type ManifestUpdateOptions struct {
LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored.
EmbeddedDockerReference reference.Named
@ -457,7 +457,7 @@ type ManifestUpdateOptions struct {
// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
// only to make writing struct literals possible.
type ManifestUpdateInformation struct {
Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
}
@ -594,6 +594,10 @@ type SystemContext struct {
// this field is ignored if `AuthFilePath` is set (we favor the newer format);
// only reading of this data is supported;
LegacyFormatAuthFilePath string
// If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed.
// This must not be set if AuthFilePath is set.
// Only credentials and credential helpers in this file apre processed, not any other configuration in this file.
DockerCompatAuthFilePath string
// If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match.
ArchitectureChoice string
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.

View File

@ -8,10 +8,10 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 28
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0
VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
VersionDev = "-dev"
)
// Version is the specification version that the package types support.

View File

@ -8,7 +8,7 @@ import (
"crypto/rsa"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"sync"
"time"
@ -159,7 +159,7 @@ func (r *RemoteKeySet) verify(ctx context.Context, jws *jose.JSONWebSignature) (
// https://openid.net/specs/openid-connect-core-1_0.html#RotateSigKeys
keys, err := r.keysFromRemote(ctx)
if err != nil {
return nil, fmt.Errorf("fetching keys %v", err)
return nil, fmt.Errorf("fetching keys %w", err)
}
for _, key := range keys {
@ -228,11 +228,11 @@ func (r *RemoteKeySet) updateKeys() ([]jose.JSONWebKey, error) {
resp, err := doRequest(r.ctx, req)
if err != nil {
return nil, fmt.Errorf("oidc: get keys failed %v", err)
return nil, fmt.Errorf("oidc: get keys failed %w", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read response body: %v", err)
}

View File

@ -10,7 +10,7 @@ import (
"errors"
"fmt"
"hash"
"io/ioutil"
"io"
"mime"
"net/http"
"strings"
@ -211,7 +211,7 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read response body: %v", err)
}
@ -332,7 +332,7 @@ func (p *Provider) UserInfo(ctx context.Context, tokenSource oauth2.TokenSource)
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, err
}

View File

@ -7,7 +7,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"net/http"
"strings"
"time"
@ -182,7 +182,7 @@ func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read response body: %v", err)
}

8
vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,8 @@
# v3.0.1
Fixed:
- Security issue: an attacker specifying a large "p2c" value can cause
JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
disclosure and to Tom Tervoort for originally publishing the category of attack.
https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf

View File

@ -415,6 +415,11 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien
if p2c <= 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer")
}
if p2c > 1000000 {
// An unauthenticated attacker can set a high P2C value. Set an upper limit to avoid
// DoS attacks.
return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: too high")
}
// salt is UTF8(Alg) || 0x00 || Salt Input
alg := headers.getAlgorithm()

View File

@ -1,6 +1,7 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/go-logr/logr/badge)](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
@ -73,6 +74,29 @@ received:
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
When the Go developers started developing such an interface with
[slog](https://github.com/golang/go/issues/56345), they adopted some of the
logr design but also left out some parts and changed others:
| Feature | logr | slog |
|---------|------|------|
| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
| Low-level API | `LogSink` | `Handler` |
| Stack unwinding | done by `LogSink` | done by `Logger` |
| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
| Passing logger via context | `NewContext`, `FromContext` | no API |
| Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr)
package.
### Inspiration
Before you consider this package, please read [this blog post by the
@ -118,6 +142,91 @@ There are implementations for the following logging libraries:
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller.
## Using a `logr.Sink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can
convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls.
Such an implementation should also support values that implement specific
interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
`slog.GroupValue`). logr does not convert those.
Not supporting slog has several drawbacks:
- Recording source code locations works correctly if the handler gets called
through `slog.Logger`, but may be wrong in other cases. That's because a
`logr.Sink` does its own stack unwinding instead of using the program counter
provided by the high-level API.
- slog levels <= 0 can be mapped to logr levels by negating the level without a
loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
because logr does not support "more important than info" levels.
- The slog group concept is supported by prefixing each key in a key/value
pair with the group names, separated by a dot. For structured output like
JSON it would be better to group the key/value pairs inside an object.
- Special slog values and interfaces don't work as expected.
- The overhead is likely to be higher.
These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the
other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program
counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value.
- `Logger.Error` is turned into a log record with `slog.LevelError` as level
and an additional attribute with `err` as key, if an error was provided.
The main drawback is that `logr.Marshaler` will not be supported. Types should
ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient.
## Context support for slog
Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this
to fill this gap:
func HandlerFromContext(ctx context.Context) slog.Handler {
logger, err := logr.FromContext(ctx)
if err == nil {
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context {
return logr.NewContext(ctx, slogr.NewLogr(handler))
}
The downside is that storing and retrieving a `slog.Handler` needs more
allocations compared to using a `logr.Logger`. Therefore the recommendation is
to use the `logr.Logger` API in code which uses contextual logging.
## FAQ
### Conceptual
@ -241,7 +350,9 @@ Otherwise, you can start out with `0` as "you always want to see this",
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs.)
info-type logs). For reference, slog pre-defines -4 for debug logs
(corresponds to 4 in logr), which matches what is
[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
#### How do I choose my keys?

18
vendor/github.com/go-logr/logr/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,18 @@
# Security Policy
If you have discovered a security vulnerability in this project, please report it
privately. **Do not disclose it as a public issue.** This gives us time to work with you
to fix the issue before public exposure, reducing the chance that the exploit will be
used before a patch is released.
You may submit the report in the following ways:
- send an email to go-logr-security@googlegroups.com
- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
Please provide the following information in your report:
- A description of the vulnerability and its impact
- How to reproduce the issue
We ask that you give us 90 days to work on a fix before public exposure.

View File

@ -116,17 +116,17 @@ type Options struct {
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
RenderBuiltinsHook func(kvList []interface{}) []interface{}
RenderBuiltinsHook func(kvList []any) []any
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
RenderValuesHook func(kvList []interface{}) []interface{}
RenderValuesHook func(kvList []any) []any
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
RenderArgsHook func(kvList []interface{}) []interface{}
RenderArgsHook func(kvList []any) []any
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
@ -163,7 +163,7 @@ func (l fnlogger) WithName(name string) logr.LogSink {
return &l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
@ -173,12 +173,12 @@ func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
return &l
}
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
func (l fnlogger) Info(level int, msg string, kvList ...any) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
func (l fnlogger) Error(err error, msg string, kvList ...any) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
@ -229,7 +229,7 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
type Formatter struct {
outputFormat outputFormat
prefix string
values []interface{}
values []any
valuesStr string
depth int
opts *Options
@ -246,10 +246,10 @@ const (
)
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []interface{}
type PseudoStruct []any
// render produces a log line, ready to use.
func (f Formatter) render(builtins, args []interface{}) string {
func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
@ -292,7 +292,7 @@ func (f Formatter) render(builtins, args []interface{}) string {
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
@ -334,7 +334,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing b
return kvList
}
func (f Formatter) pretty(value interface{}) string {
func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0)
}
@ -343,7 +343,7 @@ const (
)
// TODO: This is not fast. Most of the overhead goes here.
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `"<max-log-depth-exceeded>"`
}
@ -614,7 +614,7 @@ func isEmpty(v reflect.Value) bool {
return false
}
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
func invokeMarshaler(m logr.Marshaler) (ret any) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
@ -675,12 +675,12 @@ func (f Formatter) caller() Caller {
const noValue = "<no-value>"
func (f Formatter) nonStringKey(v interface{}) string {
func (f Formatter) nonStringKey(v any) string {
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
func (f Formatter) snippet(v interface{}) string {
func (f Formatter) snippet(v any) string {
const snipLen = 16
snip := f.pretty(v)
@ -693,7 +693,7 @@ func (f Formatter) snippet(v interface{}) string {
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
func (f Formatter) sanitize(kvList []any) []any {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
@ -727,8 +727,8 @@ func (f Formatter) GetDepth() int {
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@ -745,10 +745,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (pref
}
// FormatError renders an Error log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
args := make([]any, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
@ -761,12 +761,12 @@ func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (pre
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
var loggableErr any
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
return f.prefix, f.render(args, kvList)
return prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
@ -781,7 +781,7 @@ func (f *Formatter) AddName(name string) {
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
func (f *Formatter) AddValues(kvList []interface{}) {
func (f *Formatter) AddValues(kvList []any) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)

View File

@ -127,9 +127,9 @@ limitations under the License.
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
// Calling methods with the null logger (Logger{}) as instance will crash
// because it has no LogSink. Therefore this null logger should never be passed
// around. For cases where passing a logger is optional, a pointer to Logger
// The zero logger (= Logger{}) is identical to Discard() and discards all log
// entries. Code that receives a Logger by value can simply call it, the methods
// will never crash. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
// # Key Naming Conventions
@ -258,6 +258,12 @@ type Logger struct {
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
// Some implementations of LogSink look at the caller in Enabled (e.g.
// different verbosity levels per package or file), but we only pass one
// CallDepth in (via Init). This means that all calls from Logger to the
// LogSink's Enabled, Info, and Error methods must have the same number of
// frames. In other words, Logger methods can't call other Logger methods
// which call these LogSink methods unless we do it the same in all paths.
return l.sink != nil && l.sink.Enabled(l.level)
}
@ -267,11 +273,11 @@ func (l Logger) Enabled() bool {
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
func (l Logger) Info(msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
if l.Enabled() {
if l.sink.Enabled(l.level) { // see comment in Enabled
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
@ -289,7 +295,7 @@ func (l Logger) Info(msg string, keysAndValues ...interface{}) {
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
func (l Logger) Error(err error, msg string, keysAndValues ...any) {
if l.sink == nil {
return
}
@ -314,9 +320,16 @@ func (l Logger) V(level int) Logger {
return l
}
// GetV returns the verbosity level of the logger. If the logger's LogSink is
// nil as in the Discard logger, this will always return 0.
func (l Logger) GetV() int {
// 0 if l.sink nil because of the if check in V above.
return l.level
}
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
func (l Logger) WithValues(keysAndValues ...any) Logger {
if l.sink == nil {
return l
}
@ -467,15 +480,15 @@ type LogSink interface {
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
Info(level int, msg string, keysAndValues ...interface{})
Info(level int, msg string, keysAndValues ...any)
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
Error(err error, msg string, keysAndValues ...interface{})
Error(err error, msg string, keysAndValues ...any)
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
WithValues(keysAndValues ...interface{}) LogSink
WithValues(keysAndValues ...any) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
@ -546,5 +559,5 @@ type Marshaler interface {
// with exported fields
//
// It may return any value of any type.
MarshalLog() interface{}
MarshalLog() any
}

View File

@ -1,3 +1,9 @@
## 0.7.5 (Nov 8, 2023)
BUG FIXES
- client: fixes an issue where the request body is not preserved on temporary redirects or re-established HTTP/2 connections [GH-207]
## 0.7.4 (Jun 6, 2023)
BUG FIXES

View File

@ -160,6 +160,20 @@ func (r *Request) SetBody(rawBody interface{}) error {
}
r.body = bodyReader
r.ContentLength = contentLength
if bodyReader != nil {
r.GetBody = func() (io.ReadCloser, error) {
body, err := bodyReader()
if err != nil {
return nil, err
}
if rc, ok := body.(io.ReadCloser); ok {
return rc, nil
}
return io.NopCloser(body), nil
}
} else {
r.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil }
}
return nil
}
@ -302,18 +316,19 @@ func NewRequest(method, url string, rawBody interface{}) (*Request, error) {
// The context controls the entire lifetime of a request and its response:
// obtaining a connection, sending the request, and reading the response headers and body.
func NewRequestWithContext(ctx context.Context, method, url string, rawBody interface{}) (*Request, error) {
bodyReader, contentLength, err := getBodyReaderAndContentLength(rawBody)
if err != nil {
return nil, err
}
httpReq, err := http.NewRequestWithContext(ctx, method, url, nil)
if err != nil {
return nil, err
}
httpReq.ContentLength = contentLength
return &Request{body: bodyReader, Request: httpReq}, nil
req := &Request{
Request: httpReq,
}
if err := req.SetBody(rawBody); err != nil {
return nil, err
}
return req, nil
}
// Logger interface allows to use other loggers than

View File

@ -16,6 +16,14 @@ This package provides various compression algorithms.
# changelog
* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838

View File

@ -212,7 +212,7 @@ func (s *Scratch) writeCount() error {
previous0 bool
charnum uint16
maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size
bitStream = uint32(tableLog - minTablelog)

View File

@ -43,7 +43,7 @@ func (m *match) estBits(bitsPerByte int32) {
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
ofc = ofCode(uint32(m.rep))
ofc = ofCode(uint32(m.rep) & 3)
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@ -227,7 +227,7 @@ encodeLoop:
}
}
l := 4 + e.matchlen(s+4, offset+4, src)
if rep < 0 {
if true {
// Extend candidate match backwards as far as possible.
tMin := s - e.maxMatchOff
if tMin < 0 {
@ -282,6 +282,7 @@ encodeLoop:
// Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
index0 := s + 1
// Look far ahead, unless we have a really long match already...
if best.length < goodEnough {
@ -357,19 +358,16 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index old s + 1 -> s - 1
index0 := s + 1
s = best.s + best.length
nextEmit = s
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
// Index skipped...
end := s
if s > sLimit+4 {
end = sLimit + 4
}
off := index0 + e.cur
for index0 < s {
for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@ -378,6 +376,7 @@ encodeLoop:
off++
index0++
}
switch best.rep {
case 2, 4 | 1:
offset1, offset2 = offset2, offset1
@ -386,12 +385,17 @@ encodeLoop:
case 4 | 3:
offset1, offset2, offset3 = offset1-1, offset1, offset2
}
if s >= sLimit {
if debugEncoder {
println("repeat ended", s, best.length)
}
break encodeLoop
}
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
index0 := s + 1
s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@ -419,19 +423,25 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
if s >= sLimit {
break encodeLoop
// Index old s + 1 -> s - 1 or sLimit
end := s
if s > sLimit-4 {
end = sLimit - 4
}
// Index old s + 1 -> s - 1
for index0 < s {
off := index0 + e.cur
for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
off++
}
if s >= sLimit {
break encodeLoop
}
}

View File

@ -145,7 +145,7 @@ encodeLoop:
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
var matched int32
var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@ -162,6 +162,7 @@ encodeLoop:
off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -258,7 +259,6 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
@ -498,15 +498,15 @@ encodeLoop:
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
off += 2
}
cv = load6432(src, s)
@ -672,7 +672,7 @@ encodeLoop:
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
var matched int32
var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@ -691,6 +691,7 @@ encodeLoop:
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS)
index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
@ -726,7 +727,6 @@ encodeLoop:
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
index0 := s + repOff
s += lenght + repOff
nextEmit = s
@ -790,7 +790,6 @@ encodeLoop:
}
blk.sequences = append(blk.sequences, seq)
index0 := s + repOff2
s += lenght + repOff2
nextEmit = s
if s >= sLimit {
@ -1024,18 +1023,18 @@ encodeLoop:
}
// Index match start+1 (long) -> s - 1
index0 := s - l + 1
off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
off += 2
}
cv = load6432(src, s)

File diff suppressed because it is too large Load Diff

View File

@ -147,9 +147,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
#define SQLITE_VERSION "3.42.0"
#define SQLITE_VERSION_NUMBER 3042000
#define SQLITE_SOURCE_ID "2023-05-16 12:36:15 831d0fb2836b71c9bc51067c49fee4b8f18047814f2ff22d817d25195cf350b0"
#define SQLITE_VERSION "3.44.0"
#define SQLITE_VERSION_NUMBER 3044000
#define SQLITE_SOURCE_ID "2023-11-01 11:23:50 17129ba1ff7f0daf37100ee82d507aef7827cf38de1866e2633096ae6ad81301"
/*
** CAPI3REF: Run-Time Library Version Numbers
@ -529,6 +529,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_IOERR_ROLLBACK_ATOMIC (SQLITE_IOERR | (31<<8))
#define SQLITE_IOERR_DATA (SQLITE_IOERR | (32<<8))
#define SQLITE_IOERR_CORRUPTFS (SQLITE_IOERR | (33<<8))
#define SQLITE_IOERR_IN_PAGE (SQLITE_IOERR | (34<<8))
#define SQLITE_LOCKED_SHAREDCACHE (SQLITE_LOCKED | (1<<8))
#define SQLITE_LOCKED_VTAB (SQLITE_LOCKED | (2<<8))
#define SQLITE_BUSY_RECOVERY (SQLITE_BUSY | (1<<8))
@ -1191,7 +1192,7 @@ struct sqlite3_io_methods {
** by clients within the current process, only within other processes.
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use internally by the
** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
@ -2127,7 +2128,7 @@ struct sqlite3_mem_methods {
** is stored in each sorted record and the required column values loaded
** from the database as records are returned in sorted order. The default
** value for this option is to never use this optimization. Specifying a
** negative value for this option restores the default behaviour.
** negative value for this option restores the default behavior.
** This option is only available if SQLite is compiled with the
** [SQLITE_ENABLE_SORTER_REFERENCES] compile-time option.
**
@ -2302,7 +2303,7 @@ struct sqlite3_mem_methods {
** database handle, SQLite checks if this will mean that there are now no
** connections at all to the database. If so, it performs a checkpoint
** operation before closing the connection. This option may be used to
** override this behaviour. The first parameter passed to this operation
** override this behavior. The first parameter passed to this operation
** is an integer - positive to disable checkpoints-on-close, or zero (the
** default) to enable them, and negative to leave the setting unchanged.
** The second parameter is a pointer to an integer
@ -2455,7 +2456,7 @@ struct sqlite3_mem_methods {
** the [VACUUM] command will fail with an obscure error when attempting to
** process a table with generated columns and a descending index. This is
** not considered a bug since SQLite versions 3.3.0 and earlier do not support
** either generated columns or decending indexes.
** either generated columns or descending indexes.
** </dd>
**
** [[SQLITE_DBCONFIG_STMT_SCANSTATUS]]
@ -2736,6 +2737,7 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
**
** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
** or not an interrupt is currently in effect for [database connection] D.
** It returns 1 if an interrupt is currently in effect, or 0 otherwise.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
SQLITE_API int sqlite3_is_interrupted(sqlite3*);
@ -3389,8 +3391,10 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** M argument should be the bitwise OR-ed combination of
** zero or more [SQLITE_TRACE] constants.
**
** ^Each call to either sqlite3_trace() or sqlite3_trace_v2() overrides
** (cancels) any prior calls to sqlite3_trace() or sqlite3_trace_v2().
** ^Each call to either sqlite3_trace(D,X,P) or sqlite3_trace_v2(D,M,X,P)
** overrides (cancels) all prior calls to sqlite3_trace(D,X,P) or
** sqlite3_trace_v2(D,M,X,P) for the [database connection] D. Each
** database connection may have at most one trace callback.
**
** ^The X callback is invoked whenever any of the events identified by
** mask M occur. ^The integer return value from the callback is currently
@ -3759,7 +3763,7 @@ SQLITE_API int sqlite3_open_v2(
** as F) must be one of:
** <ul>
** <li> A database filename pointer created by the SQLite core and
** passed into the xOpen() method of a VFS implemention, or
** passed into the xOpen() method of a VFS implementation, or
** <li> A filename obtained from [sqlite3_db_filename()], or
** <li> A new filename constructed using [sqlite3_create_filename()].
** </ul>
@ -3872,7 +3876,7 @@ SQLITE_API sqlite3_file *sqlite3_database_file_object(const char*);
/*
** CAPI3REF: Create and Destroy VFS Filenames
**
** These interfces are provided for use by [VFS shim] implementations and
** These interfaces are provided for use by [VFS shim] implementations and
** are not useful outside of that context.
**
** The sqlite3_create_filename(D,J,W,N,P) allocates memory to hold a version of
@ -3952,6 +3956,7 @@ SQLITE_API void sqlite3_free_filename(sqlite3_filename);
**
** ^The sqlite3_errmsg() and sqlite3_errmsg16() return English-language
** text that describes the error, as either UTF-8 or UTF-16 respectively.
** (See how SQLite handles [invalid UTF] for exceptions to this rule.)
** ^(Memory to hold the error message string is managed internally.
** The application does not need to worry about freeing the result.
** However, the error string might be overwritten or deallocated by
@ -4419,6 +4424,41 @@ SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
*/
SQLITE_API int sqlite3_stmt_isexplain(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Change The EXPLAIN Setting For A Prepared Statement
** METHOD: sqlite3_stmt
**
** The sqlite3_stmt_explain(S,E) interface changes the EXPLAIN
** setting for [prepared statement] S. If E is zero, then S becomes
** a normal prepared statement. If E is 1, then S behaves as if
** its SQL text began with "[EXPLAIN]". If E is 2, then S behaves as if
** its SQL text began with "[EXPLAIN QUERY PLAN]".
**
** Calling sqlite3_stmt_explain(S,E) might cause S to be reprepared.
** SQLite tries to avoid a reprepare, but a reprepare might be necessary
** on the first transition into EXPLAIN or EXPLAIN QUERY PLAN mode.
**
** Because of the potential need to reprepare, a call to
** sqlite3_stmt_explain(S,E) will fail with SQLITE_ERROR if S cannot be
** reprepared because it was created using [sqlite3_prepare()] instead of
** the newer [sqlite3_prepare_v2()] or [sqlite3_prepare_v3()] interfaces and
** hence has no saved SQL text with which to reprepare.
**
** Changing the explain setting for a prepared statement does not change
** the original SQL text for the statement. Hence, if the SQL text originally
** began with EXPLAIN or EXPLAIN QUERY PLAN, but sqlite3_stmt_explain(S,0)
** is called to convert the statement into an ordinary statement, the EXPLAIN
** or EXPLAIN QUERY PLAN keywords will still appear in the sqlite3_sql(S)
** output, even though the statement now acts like a normal SQL statement.
**
** This routine returns SQLITE_OK if the explain mode is successfully
** changed, or an error code if the explain mode could not be changed.
** The explain mode cannot be changed while a statement is active.
** Hence, it is good practice to call [sqlite3_reset(S)]
** immediately prior to calling sqlite3_stmt_explain(S,E).
*/
SQLITE_API int sqlite3_stmt_explain(sqlite3_stmt *pStmt, int eMode);
/*
** CAPI3REF: Determine If A Prepared Statement Has Been Reset
** METHOD: sqlite3_stmt
@ -4582,7 +4622,7 @@ typedef struct sqlite3_context sqlite3_context;
** with it may be passed. ^It is called to dispose of the BLOB or string even
** if the call to the bind API fails, except the destructor is not called if
** the third parameter is a NULL pointer or the fourth parameter is negative.
** ^ (2) The special constant, [SQLITE_STATIC], may be passsed to indicate that
** ^ (2) The special constant, [SQLITE_STATIC], may be passed to indicate that
** the application remains responsible for disposing of the object. ^In this
** case, the object and the provided pointer to it must remain valid until
** either the prepared statement is finalized or the same SQL parameter is
@ -5261,20 +5301,33 @@ SQLITE_API int sqlite3_finalize(sqlite3_stmt *pStmt);
** ^The [sqlite3_reset(S)] interface resets the [prepared statement] S
** back to the beginning of its program.
**
** ^If the most recent call to [sqlite3_step(S)] for the
** [prepared statement] S returned [SQLITE_ROW] or [SQLITE_DONE],
** or if [sqlite3_step(S)] has never before been called on S,
** then [sqlite3_reset(S)] returns [SQLITE_OK].
** ^The return code from [sqlite3_reset(S)] indicates whether or not
** the previous evaluation of prepared statement S completed successfully.
** ^If [sqlite3_step(S)] has never before been called on S or if
** [sqlite3_step(S)] has not been called since the previous call
** to [sqlite3_reset(S)], then [sqlite3_reset(S)] will return
** [SQLITE_OK].
**
** ^If the most recent call to [sqlite3_step(S)] for the
** [prepared statement] S indicated an error, then
** [sqlite3_reset(S)] returns an appropriate [error code].
** ^The [sqlite3_reset(S)] interface might also return an [error code]
** if there were no prior errors but the process of resetting
** the prepared statement caused a new error. ^For example, if an
** [INSERT] statement with a [RETURNING] clause is only stepped one time,
** that one call to [sqlite3_step(S)] might return SQLITE_ROW but
** the overall statement might still fail and the [sqlite3_reset(S)] call
** might return SQLITE_BUSY if locking constraints prevent the
** database change from committing. Therefore, it is important that
** applications check the return code from [sqlite3_reset(S)] even if
** no prior call to [sqlite3_step(S)] indicated a problem.
**
** ^The [sqlite3_reset(S)] interface does not change the values
** of any [sqlite3_bind_blob|bindings] on the [prepared statement] S.
*/
SQLITE_API int sqlite3_reset(sqlite3_stmt *pStmt);
/*
** CAPI3REF: Create Or Redefine SQL Functions
** KEYWORDS: {function creation routines}
@ -5485,7 +5538,7 @@ SQLITE_API int sqlite3_create_window_function(
** [application-defined SQL function]
** that has side-effects or that could potentially leak sensitive information.
** This will prevent attacks in which an application is tricked
** into using a database file that has had its schema surreptiously
** into using a database file that has had its schema surreptitiously
** modified to invoke the application-defined function in ways that are
** harmful.
** <p>
@ -5829,32 +5882,32 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** METHOD: sqlite3_context
**
** These functions may be used by (non-aggregate) SQL functions to
** associate metadata with argument values. If the same value is passed to
** multiple invocations of the same SQL function during query execution, under
** some circumstances the associated metadata may be preserved. An example
** of where this might be useful is in a regular-expression matching
** function. The compiled version of the regular expression can be stored as
** metadata associated with the pattern string.
** associate auxiliary data with argument values. If the same argument
** value is passed to multiple invocations of the same SQL function during
** query execution, under some circumstances the associated auxiliary data
** might be preserved. An example of where this might be useful is in a
** regular-expression matching function. The compiled version of the regular
** expression can be stored as auxiliary data associated with the pattern string.
** Then as long as the pattern string remains the same,
** the compiled regular expression can be reused on multiple
** invocations of the same function.
**
** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the metadata
** ^The sqlite3_get_auxdata(C,N) interface returns a pointer to the auxiliary data
** associated by the sqlite3_set_auxdata(C,N,P,X) function with the Nth argument
** value to the application-defined function. ^N is zero for the left-most
** function argument. ^If there is no metadata
** function argument. ^If there is no auxiliary data
** associated with the function argument, the sqlite3_get_auxdata(C,N) interface
** returns a NULL pointer.
**
** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as metadata for the N-th
** argument of the application-defined function. ^Subsequent
** ^The sqlite3_set_auxdata(C,N,P,X) interface saves P as auxiliary data for the
** N-th argument of the application-defined function. ^Subsequent
** calls to sqlite3_get_auxdata(C,N) return P from the most recent
** sqlite3_set_auxdata(C,N,P,X) call if the metadata is still valid or
** NULL if the metadata has been discarded.
** sqlite3_set_auxdata(C,N,P,X) call if the auxiliary data is still valid or
** NULL if the auxiliary data has been discarded.
** ^After each call to sqlite3_set_auxdata(C,N,P,X) where X is not NULL,
** SQLite will invoke the destructor function X with parameter P exactly
** once, when the metadata is discarded.
** SQLite is free to discard the metadata at any time, including: <ul>
** once, when the auxiliary data is discarded.
** SQLite is free to discard the auxiliary data at any time, including: <ul>
** <li> ^(when the corresponding function parameter changes)^, or
** <li> ^(when [sqlite3_reset()] or [sqlite3_finalize()] is called for the
** SQL statement)^, or
@ -5870,7 +5923,7 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
** function implementation should not make any use of P after
** sqlite3_set_auxdata() has been called.
**
** ^(In practice, metadata is preserved between function calls for
** ^(In practice, auxiliary data is preserved between function calls for
** function parameters that are compile-time constants, including literal
** values and [parameters] and expressions composed from the same.)^
**
@ -5880,10 +5933,67 @@ SQLITE_API sqlite3 *sqlite3_context_db_handle(sqlite3_context*);
**
** These routines must be called from the same thread in which
** the SQL function is running.
**
** See also: [sqlite3_get_clientdata()] and [sqlite3_set_clientdata()].
*/
SQLITE_API void *sqlite3_get_auxdata(sqlite3_context*, int N);
SQLITE_API void sqlite3_set_auxdata(sqlite3_context*, int N, void*, void (*)(void*));
/*
** CAPI3REF: Database Connection Client Data
** METHOD: sqlite3
**
** These functions are used to associate one or more named pointers
** with a [database connection].
** A call to sqlite3_set_clientdata(D,N,P,X) causes the pointer P
** to be attached to [database connection] D using name N. Subsequent
** calls to sqlite3_get_clientdata(D,N) will return a copy of pointer P
** or a NULL pointer if there were no prior calls to
** sqlite3_set_clientdata() with the same values of D and N.
** Names are compared using strcmp() and are thus case sensitive.
**
** If P and X are both non-NULL, then the destructor X is invoked with
** argument P on the first of the following occurrences:
** <ul>
** <li> An out-of-memory error occurs during the call to
** sqlite3_set_clientdata() which attempts to register pointer P.
** <li> A subsequent call to sqlite3_set_clientdata(D,N,P,X) is made
** with the same D and N parameters.
** <li> The database connection closes. SQLite does not make any guarantees
** about the order in which destructors are called, only that all
** destructors will be called exactly once at some point during the
** database connection closing process.
** </ul>
**
** SQLite does not do anything with client data other than invoke
** destructors on the client data at the appropriate time. The intended
** use for client data is to provide a mechanism for wrapper libraries
** to store additional information about an SQLite database connection.
**
** There is no limit (other than available memory) on the number of different
** client data pointers (with different names) that can be attached to a
** single database connection. However, the implementation is optimized
** for the case of having only one or two different client data names.
** Applications and wrapper libraries are discouraged from using more than
** one client data name each.
**
** There is no way to enumerate the client data pointers
** associated with a database connection. The N parameter can be thought
** of as a secret key such that only code that knows the secret key is able
** to access the associated data.
**
** Security Warning: These interfaces should not be exposed in scripting
** languages or in other circumstances where it might be possible for an
** an attacker to invoke them. Any agent that can invoke these interfaces
** can probably also take control of the process.
**
** Database connection client data is only available for SQLite
** version 3.44.0 ([dateof:3.44.0]) and later.
**
** See also: [sqlite3_set_auxdata()] and [sqlite3_get_auxdata()].
*/
SQLITE_API void *sqlite3_get_clientdata(sqlite3*,const char*);
SQLITE_API int sqlite3_set_clientdata(sqlite3*, const char*, void*, void(*)(void*));
/*
** CAPI3REF: Constants Defining Special Destructor Behavior
@ -6516,7 +6626,7 @@ SQLITE_API int sqlite3_db_readonly(sqlite3 *db, const char *zDbName);
SQLITE_API int sqlite3_txn_state(sqlite3*,const char *zSchema);
/*
** CAPI3REF: Allowed return values from [sqlite3_txn_state()]
** CAPI3REF: Allowed return values from sqlite3_txn_state()
** KEYWORDS: {transaction state}
**
** These constants define the current transaction state of a database file.
@ -6648,7 +6758,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^Each call to the sqlite3_autovacuum_pages() interface overrides all
** previous invocations for that database connection. ^If the callback
** argument (C) to sqlite3_autovacuum_pages(D,C,P,X) is a NULL pointer,
** then the autovacuum steps callback is cancelled. The return value
** then the autovacuum steps callback is canceled. The return value
** from sqlite3_autovacuum_pages() is normally SQLITE_OK, but might
** be some other error code if something goes wrong. The current
** implementation will only return SQLITE_OK or SQLITE_MISUSE, but other
@ -7167,6 +7277,10 @@ struct sqlite3_module {
/* The methods above are in versions 1 and 2 of the sqlite_module object.
** Those below are for version 3 and greater. */
int (*xShadowName)(const char*);
/* The methods above are in versions 1 through 3 of the sqlite_module object.
** Those below are for version 4 and greater. */
int (*xIntegrity)(sqlite3_vtab *pVTab, const char *zSchema,
const char *zTabName, int mFlags, char **pzErr);
};
/*
@ -7654,7 +7768,7 @@ SQLITE_API int sqlite3_blob_reopen(sqlite3_blob *, sqlite3_int64);
** code is returned and the transaction rolled back.
**
** Calling this function with an argument that is not a NULL pointer or an
** open blob handle results in undefined behaviour. ^Calling this routine
** open blob handle results in undefined behavior. ^Calling this routine
** with a null pointer (such as would be returned by a failed call to
** [sqlite3_blob_open()]) is a harmless no-op. ^Otherwise, if this function
** is passed a valid open blob handle, the values returned by the
@ -8134,6 +8248,7 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_PRNG_SAVE 5
#define SQLITE_TESTCTRL_PRNG_RESTORE 6
#define SQLITE_TESTCTRL_PRNG_RESET 7 /* NOT USED */
#define SQLITE_TESTCTRL_FK_NO_ACTION 7
#define SQLITE_TESTCTRL_BITVEC_TEST 8
#define SQLITE_TESTCTRL_FAULT_INSTALL 9
#define SQLITE_TESTCTRL_BENIGN_MALLOC_HOOKS 10
@ -8162,7 +8277,8 @@ SQLITE_API int sqlite3_test_control(int op, ...);
#define SQLITE_TESTCTRL_TRACEFLAGS 31
#define SQLITE_TESTCTRL_TUNE 32
#define SQLITE_TESTCTRL_LOGEST 33
#define SQLITE_TESTCTRL_LAST 33 /* Largest TESTCTRL */
#define SQLITE_TESTCTRL_USELONGDOUBLE 34
#define SQLITE_TESTCTRL_LAST 34 /* Largest TESTCTRL */
/*
** CAPI3REF: SQL Keyword Checking
@ -9618,7 +9734,7 @@ SQLITE_API int sqlite3_vtab_config(sqlite3*, int op, ...);
** [[SQLITE_VTAB_DIRECTONLY]]<dt>SQLITE_VTAB_DIRECTONLY</dt>
** <dd>Calls of the form
** [sqlite3_vtab_config](db,SQLITE_VTAB_DIRECTONLY) from within the
** the [xConnect] or [xCreate] methods of a [virtual table] implmentation
** the [xConnect] or [xCreate] methods of a [virtual table] implementation
** prohibits that virtual table from being used from within triggers and
** views.
** </dd>
@ -9808,7 +9924,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info*);
** communicated to the xBestIndex method as a
** [SQLITE_INDEX_CONSTRAINT_EQ] constraint.)^ If xBestIndex wants to use
** this constraint, it must set the corresponding
** aConstraintUsage[].argvIndex to a postive integer. ^(Then, under
** aConstraintUsage[].argvIndex to a positive integer. ^(Then, under
** the usual mode of handling IN operators, SQLite generates [bytecode]
** that invokes the [xFilter|xFilter() method] once for each value
** on the right-hand side of the IN operator.)^ Thus the virtual table
@ -10237,7 +10353,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** When the [sqlite3_blob_write()] API is used to update a blob column,
** the pre-update hook is invoked with SQLITE_DELETE. This is because the
** in this case the new values are not available. In this case, when a
** callback made with op==SQLITE_DELETE is actuall a write using the
** callback made with op==SQLITE_DELETE is actually a write using the
** sqlite3_blob_write() API, the [sqlite3_preupdate_blobwrite()] returns
** the index of the column being written. In other cases, where the
** pre-update hook is being invoked for some other reason, including a
@ -10498,6 +10614,13 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const c
** SQLITE_SERIALIZE_NOCOPY bit is set but no contiguous copy
** of the database exists.
**
** After the call, if the SQLITE_SERIALIZE_NOCOPY bit had been set,
** the returned buffer content will remain accessible and unchanged
** until either the next write operation on the connection or when
** the connection is closed, and applications must not modify the
** buffer. If the bit had been clear, the returned buffer will not
** be accessed by SQLite after the call.
**
** A call to sqlite3_serialize(D,S,P,F) might return NULL even if the
** SQLITE_SERIALIZE_NOCOPY bit is omitted from argument F if a memory
** allocation error occurs.
@ -10546,6 +10669,9 @@ SQLITE_API unsigned char *sqlite3_serialize(
** SQLite will try to increase the buffer size using sqlite3_realloc64()
** if writes on the database cause it to grow larger than M bytes.
**
** Applications must not modify the buffer P or invalidate it before
** the database connection D is closed.
**
** The sqlite3_deserialize() interface will fail with SQLITE_BUSY if the
** database is currently in a read transaction or is involved in a backup
** operation.
@ -10554,6 +10680,13 @@ SQLITE_API unsigned char *sqlite3_serialize(
** S argument to sqlite3_deserialize(D,S,P,N,M,F) is "temp" then the
** function returns SQLITE_ERROR.
**
** The deserialized database should not be in [WAL mode]. If the database
** is in WAL mode, then any attempt to use the database file will result
** in an [SQLITE_CANTOPEN] error. The application can set the
** [file format version numbers] (bytes 18 and 19) of the input database P
** to 0x01 prior to invoking sqlite3_deserialize(D,S,P,N,M,F) to force the
** database file into rollback mode and work around this limitation.
**
** If sqlite3_deserialize(D,S,P,N,M,F) fails for any reason and if the
** SQLITE_DESERIALIZE_FREEONCLOSE bit is set in argument F, then
** [sqlite3_free()] is invoked on argument P prior to returning.
@ -11626,6 +11759,18 @@ SQLITE_API int sqlite3changeset_concat(
);
/*
** CAPI3REF: Upgrade the Schema of a Changeset/Patchset
*/
SQLITE_API int sqlite3changeset_upgrade(
sqlite3 *db,
const char *zDb,
int nIn, const void *pIn, /* Input changeset */
int *pnOut, void **ppOut /* OUT: Inverse of input */
);
/*
** CAPI3REF: Changegroup Handle
**
@ -11672,6 +11817,38 @@ typedef struct sqlite3_changegroup sqlite3_changegroup;
*/
SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
/*
** CAPI3REF: Add a Schema to a Changegroup
** METHOD: sqlite3_changegroup_schema
**
** This method may be used to optionally enforce the rule that the changesets
** added to the changegroup handle must match the schema of database zDb
** ("main", "temp", or the name of an attached database). If
** sqlite3changegroup_add() is called to add a changeset that is not compatible
** with the configured schema, SQLITE_SCHEMA is returned and the changegroup
** object is left in an undefined state.
**
** A changeset schema is considered compatible with the database schema in
** the same way as for sqlite3changeset_apply(). Specifically, for each
** table in the changeset, there exists a database table with:
**
** <ul>
** <li> The name identified by the changeset, and
** <li> at least as many columns as recorded in the changeset, and
** <li> the primary key columns in the same position as recorded in
** the changeset.
** </ul>
**
** The output of the changegroup object always has the same schema as the
** database nominated using this function. In cases where changesets passed
** to sqlite3changegroup_add() have fewer columns than the corresponding table
** in the database schema, these are filled in using the default column
** values from the database schema. This makes it possible to combined
** changesets that have different numbers of columns for a single table
** within a changegroup, provided that they are otherwise compatible.
*/
SQLITE_API int sqlite3changegroup_schema(sqlite3_changegroup*, sqlite3*, const char *zDb);
/*
** CAPI3REF: Add A Changeset To A Changegroup
** METHOD: sqlite3_changegroup
@ -11740,13 +11917,18 @@ SQLITE_API int sqlite3changegroup_new(sqlite3_changegroup **pp);
** If the new changeset contains changes to a table that is already present
** in the changegroup, then the number of columns and the position of the
** primary key columns for the table must be consistent. If this is not the
** case, this function fails with SQLITE_SCHEMA. If the input changeset
** appears to be corrupt and the corruption is detected, SQLITE_CORRUPT is
** returned. Or, if an out-of-memory condition occurs during processing, this
** function returns SQLITE_NOMEM. In all cases, if an error occurs the state
** of the final contents of the changegroup is undefined.
** case, this function fails with SQLITE_SCHEMA. Except, if the changegroup
** object has been configured with a database schema using the
** sqlite3changegroup_schema() API, then it is possible to combine changesets
** with different numbers of columns for a single table, provided that
** they are otherwise compatible.
**
** If no error occurs, SQLITE_OK is returned.
** If the input changeset appears to be corrupt and the corruption is
** detected, SQLITE_CORRUPT is returned. Or, if an out-of-memory condition
** occurs during processing, this function returns SQLITE_NOMEM.
**
** In all cases, if an error occurs the state of the final contents of the
** changegroup is undefined. If no error occurs, SQLITE_OK is returned.
*/
SQLITE_API int sqlite3changegroup_add(sqlite3_changegroup*, int nData, void *pData);
@ -12011,10 +12193,17 @@ SQLITE_API int sqlite3changeset_apply_v2(
** <li>an insert change if all fields of the conflicting row match
** the row being inserted.
** </ul>
**
** <dt>SQLITE_CHANGESETAPPLY_FKNOACTION <dd>
** If this flag it set, then all foreign key constraints in the target
** database behave as if they were declared with "ON UPDATE NO ACTION ON
** DELETE NO ACTION", even if they are actually CASCADE, RESTRICT, SET NULL
** or SET DEFAULT.
*/
#define SQLITE_CHANGESETAPPLY_NOSAVEPOINT 0x0001
#define SQLITE_CHANGESETAPPLY_INVERT 0x0002
#define SQLITE_CHANGESETAPPLY_IGNORENOOP 0x0004
#define SQLITE_CHANGESETAPPLY_FKNOACTION 0x0008
/*
** CAPI3REF: Constants Passed To The Conflict Handler
@ -12755,7 +12944,7 @@ struct Fts5PhraseIter {
** See xPhraseFirstColumn above.
*/
struct Fts5ExtensionApi {
int iVersion; /* Currently always set to 3 */
int iVersion; /* Currently always set to 2 */
void *(*xUserData)(Fts5Context*);
@ -12984,8 +13173,8 @@ struct Fts5ExtensionApi {
** as separate queries of the FTS index are required for each synonym.
**
** When using methods (2) or (3), it is important that the tokenizer only
** provide synonyms when tokenizing document text (method (2)) or query
** text (method (3)), not both. Doing so will not cause any errors, but is
** provide synonyms when tokenizing document text (method (3)) or query
** text (method (2)), not both. Doing so will not cause any errors, but is
** inefficient.
*/
typedef struct Fts5Tokenizer Fts5Tokenizer;
@ -13033,7 +13222,7 @@ struct fts5_api {
int (*xCreateTokenizer)(
fts5_api *pApi,
const char *zName,
void *pContext,
void *pUserData,
fts5_tokenizer *pTokenizer,
void (*xDestroy)(void*)
);
@ -13042,7 +13231,7 @@ struct fts5_api {
int (*xFindTokenizer)(
fts5_api *pApi,
const char *zName,
void **ppContext,
void **ppUserData,
fts5_tokenizer *pTokenizer
);
@ -13050,7 +13239,7 @@ struct fts5_api {
int (*xCreateFunction)(
fts5_api *pApi,
const char *zName,
void *pContext,
void *pUserData,
fts5_extension_function xFunction,
void (*xDestroy)(void*)
);

View File

@ -366,6 +366,11 @@ struct sqlite3_api_routines {
int (*value_encoding)(sqlite3_value*);
/* Version 3.41.0 and later */
int (*is_interrupted)(sqlite3*);
/* Version 3.43.0 and later */
int (*stmt_explain)(sqlite3_stmt*,int);
/* Version 3.44.0 and later */
void *(*get_clientdata)(sqlite3*,const char*);
int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*));
};
/*
@ -694,6 +699,11 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_value_encoding sqlite3_api->value_encoding
/* Version 3.41.0 and later */
#define sqlite3_is_interrupted sqlite3_api->is_interrupted
/* Version 3.43.0 and later */
#define sqlite3_stmt_explain sqlite3_api->stmt_explain
/* Version 3.44.0 and later */
#define sqlite3_get_clientdata sqlite3_api->get_clientdata
#define sqlite3_set_clientdata sqlite3_api->set_clientdata
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)

View File

@ -26,7 +26,7 @@ import (
"errors"
"fmt"
"github.com/theupdateframework/go-tuf/encrypted"
"github.com/secure-systems-lab/go-securesystemslib/encrypted"
)
const (

View File

@ -27,7 +27,7 @@ import (
const CosignSignatureType = "cosign container image signature"
// SimpleContainerImage describes the structure of a basic container image signature payload, as defined at:
// https://github.com/containers/image/blob/master/docs/containers-signature.5.md#json-data-format
// https://github.com/containers/image/blob/main/docs/containers-signature.5.md#json-data-format
type SimpleContainerImage struct {
Critical Critical `json:"critical"` // Critical data critical to correctly evaluating the validity of the signature
Optional map[string]interface{} `json:"optional"` // Optional optional metadata about the image

View File

@ -8,6 +8,7 @@
package sif
import (
"encoding"
"encoding/binary"
"errors"
"fmt"
@ -321,51 +322,6 @@ func CreateContainerAtPath(path string, opts ...CreateOpt) (*FileImage, error) {
return f, nil
}
func zeroData(fimg *FileImage, descr *rawDescriptor) error {
// first, move to data object offset
if _, err := fimg.rw.Seek(descr.Offset, io.SeekStart); err != nil {
return err
}
var zero [4096]byte
n := descr.Size
upbound := int64(4096)
for {
if n < 4096 {
upbound = n
}
if _, err := fimg.rw.Write(zero[:upbound]); err != nil {
return err
}
n -= 4096
if n <= 0 {
break
}
}
return nil
}
func resetDescriptor(fimg *FileImage, index int) error {
// If we remove the primary partition, set the global header Arch field to HdrArchUnknown
// to indicate that the SIF file doesn't include a primary partition and no dependency
// on any architecture exists.
if fimg.rds[index].isPartitionOfType(PartPrimSys) {
fimg.h.Arch = hdrArchUnknown
}
offset := fimg.h.DescriptorsOffset + int64(index)*int64(binary.Size(fimg.rds[0]))
// first, move to descriptor offset
if _, err := fimg.rw.Seek(offset, io.SeekStart); err != nil {
return err
}
var emptyDesc rawDescriptor
return binary.Write(fimg.rw, binary.LittleEndian, emptyDesc)
}
// addOpts accumulates object add options.
type addOpts struct {
t time.Time
@ -447,6 +403,26 @@ func (f *FileImage) isLast(d *rawDescriptor) bool {
return isLast
}
// zeroReader is an io.Reader that returns a stream of zero-bytes.
type zeroReader struct{}
func (zeroReader) Read(b []byte) (int, error) {
for i := range b {
b[i] = 0
}
return len(b), nil
}
// zero overwrites the data object described by d with a stream of zero bytes.
func (f *FileImage) zero(d *rawDescriptor) error {
if _, err := f.rw.Seek(d.Offset, io.SeekStart); err != nil {
return err
}
_, err := io.CopyN(f.rw, zeroReader{}, d.Size)
return err
}
// truncateAt truncates f at the start of the padded data object described by d.
func (f *FileImage) truncateAt(d *rawDescriptor) error {
start := d.Offset + d.Size - d.SizeWithPadding
@ -530,7 +506,7 @@ func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
}
if do.zero {
if err := zeroData(f, d); err != nil {
if err := f.zero(d); err != nil {
return fmt.Errorf("%w", err)
}
}
@ -546,15 +522,17 @@ func (f *FileImage) DeleteObject(id uint32, opts ...DeleteOpt) error {
f.h.DescriptorsFree++
f.h.ModifiedAt = do.t.Unix()
index := 0
for i, od := range f.rds {
if od.ID == id {
index = i
break
}
// If we remove the primary partition, set the global header Arch field to HdrArchUnknown
// to indicate that the SIF file doesn't include a primary partition and no dependency
// on any architecture exists.
if d.isPartitionOfType(PartPrimSys) {
f.h.Arch = hdrArchUnknown
}
if err := resetDescriptor(f, index); err != nil {
// Reset rawDescripter with empty struct
*d = rawDescriptor{}
if err := f.writeDescriptors(); err != nil {
return fmt.Errorf("%w", err)
}
@ -676,3 +654,45 @@ func (f *FileImage) SetPrimPart(id uint32, opts ...SetOpt) error {
return nil
}
// SetMetadata sets the metadata of the data object with id to md, according to opts.
//
// By default, the image/object modification times are set to the current time for
// non-deterministic images, and unset otherwise. To override this, consider using
// OptSetDeterministic or OptSetWithTime.
func (f *FileImage) SetMetadata(id uint32, md encoding.BinaryMarshaler, opts ...SetOpt) error {
so := setOpts{}
if !f.isDeterministic() {
so.t = time.Now()
}
for _, opt := range opts {
if err := opt(&so); err != nil {
return fmt.Errorf("%w", err)
}
}
rd, err := f.getDescriptor(WithID(id))
if err != nil {
return fmt.Errorf("%w", err)
}
if err := rd.setExtra(md); err != nil {
return fmt.Errorf("%w", err)
}
rd.ModifiedAt = so.t.Unix()
if err := f.writeDescriptors(); err != nil {
return fmt.Errorf("%w", err)
}
f.h.ModifiedAt = so.t.Unix()
if err := f.writeHeader(); err != nil {
return fmt.Errorf("%w", err)
}
return nil
}

View File

@ -1,27 +0,0 @@
Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Prime Directive, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,226 +0,0 @@
// Package encrypted provides a simple, secure system for encrypting data
// symmetrically with a passphrase.
//
// It uses scrypt derive a key from the passphrase and the NaCl secret box
// cipher for authenticated encryption.
package encrypted
import (
"crypto/rand"
"encoding/json"
"errors"
"fmt"
"io"
"golang.org/x/crypto/nacl/secretbox"
"golang.org/x/crypto/scrypt"
)
const saltSize = 32
const (
boxKeySize = 32
boxNonceSize = 24
)
const (
// N parameter was chosen to be ~100ms of work using the default implementation
// on the 2.3GHz Core i7 Haswell processor in a late-2013 Apple Retina Macbook
// Pro (it takes ~113ms).
scryptN = 32768
scryptR = 8
scryptP = 1
)
const (
nameScrypt = "scrypt"
nameSecretBox = "nacl/secretbox"
)
type data struct {
KDF scryptKDF `json:"kdf"`
Cipher secretBoxCipher `json:"cipher"`
Ciphertext []byte `json:"ciphertext"`
}
type scryptParams struct {
N int `json:"N"`
R int `json:"r"`
P int `json:"p"`
}
func newScryptKDF() (scryptKDF, error) {
salt := make([]byte, saltSize)
if err := fillRandom(salt); err != nil {
return scryptKDF{}, err
}
return scryptKDF{
Name: nameScrypt,
Params: scryptParams{
N: scryptN,
R: scryptR,
P: scryptP,
},
Salt: salt,
}, nil
}
type scryptKDF struct {
Name string `json:"name"`
Params scryptParams `json:"params"`
Salt []byte `json:"salt"`
}
func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) {
return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize)
}
// CheckParams checks that the encoded KDF parameters are what we expect them to
// be. If we do not do this, an attacker could cause a DoS by tampering with
// them.
func (s *scryptKDF) CheckParams() error {
if s.Params.N != scryptN || s.Params.R != scryptR || s.Params.P != scryptP {
return errors.New("encrypted: unexpected kdf parameters")
}
return nil
}
func newSecretBoxCipher() (secretBoxCipher, error) {
nonce := make([]byte, boxNonceSize)
if err := fillRandom(nonce); err != nil {
return secretBoxCipher{}, err
}
return secretBoxCipher{
Name: nameSecretBox,
Nonce: nonce,
}, nil
}
type secretBoxCipher struct {
Name string `json:"name"`
Nonce []byte `json:"nonce"`
encrypted bool
}
func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte {
var keyBytes [boxKeySize]byte
var nonceBytes [boxNonceSize]byte
if len(key) != len(keyBytes) {
panic("incorrect key size")
}
if len(s.Nonce) != len(nonceBytes) {
panic("incorrect nonce size")
}
copy(keyBytes[:], key)
copy(nonceBytes[:], s.Nonce)
// ensure that we don't re-use nonces
if s.encrypted {
panic("Encrypt must only be called once for each cipher instance")
}
s.encrypted = true
return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes)
}
func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) {
var keyBytes [boxKeySize]byte
var nonceBytes [boxNonceSize]byte
if len(key) != len(keyBytes) {
panic("incorrect key size")
}
if len(s.Nonce) != len(nonceBytes) {
// return an error instead of panicking since the nonce is user input
return nil, errors.New("encrypted: incorrect nonce size")
}
copy(keyBytes[:], key)
copy(nonceBytes[:], s.Nonce)
res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes)
if !ok {
return nil, errors.New("encrypted: decryption failed")
}
return res, nil
}
// Encrypt takes a passphrase and plaintext, and returns a JSON object
// containing ciphertext and the details necessary to decrypt it.
func Encrypt(plaintext, passphrase []byte) ([]byte, error) {
k, err := newScryptKDF()
if err != nil {
return nil, err
}
key, err := k.Key(passphrase)
if err != nil {
return nil, err
}
c, err := newSecretBoxCipher()
if err != nil {
return nil, err
}
data := &data{
KDF: k,
Cipher: c,
}
data.Ciphertext = c.Encrypt(plaintext, key)
return json.Marshal(data)
}
// Marshal encrypts the JSON encoding of v using passphrase.
func Marshal(v interface{}, passphrase []byte) ([]byte, error) {
data, err := json.MarshalIndent(v, "", "\t")
if err != nil {
return nil, err
}
return Encrypt(data, passphrase)
}
// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and
// tries to decrypt it using passphrase. If successful, it returns the
// plaintext.
func Decrypt(ciphertext, passphrase []byte) ([]byte, error) {
data := &data{}
if err := json.Unmarshal(ciphertext, data); err != nil {
return nil, err
}
if data.KDF.Name != nameScrypt {
return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name)
}
if data.Cipher.Name != nameSecretBox {
return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name)
}
if err := data.KDF.CheckParams(); err != nil {
return nil, err
}
key, err := data.KDF.Key(passphrase)
if err != nil {
return nil, err
}
return data.Cipher.Decrypt(data.Ciphertext, key)
}
// Unmarshal decrypts the data using passphrase and unmarshals the resulting
// plaintext into the value pointed to by v.
func Unmarshal(data []byte, v interface{}, passphrase []byte) error {
decrypted, err := Decrypt(data, passphrase)
if err != nil {
return err
}
return json.Unmarshal(decrypted, v)
}
func fillRandom(b []byte) error {
_, err := io.ReadFull(rand.Reader, b)
return err
}

View File

@ -61,7 +61,6 @@ type renderFrame struct {
shutdown int
rmOnComplete bool
noPop bool
done bool
err error
}
@ -82,10 +81,10 @@ func newBar(ctx context.Context, container *Progress, bs *bState) *Bar {
return bar
}
// ProxyReader wraps io.Reader with metrics required for progress tracking.
// If `r` is 'unknown total/size' reader it's mandatory to call
// (*Bar).SetTotal(-1, true) method after (io.Reader).Read returns io.EOF.
// If bar is already completed or aborted, returns nil.
// ProxyReader wraps io.Reader with metrics required for progress
// tracking. If `r` is 'unknown total/size' reader it's mandatory
// to call `(*Bar).SetTotal(-1, true)` after the wrapper returns
// `io.EOF`. If bar is already completed or aborted, returns nil.
// Panics if `r` is nil.
func (b *Bar) ProxyReader(r io.Reader) io.ReadCloser {
if r == nil {
@ -177,11 +176,10 @@ func (b *Bar) TraverseDecorators(cb func(decor.Decorator)) {
}
}
// EnableTriggerComplete enables triggering complete event. It's
// effective only for bars which were constructed with `total <= 0` and
// after total has been set with (*Bar).SetTotal(int64, false). If bar
// has been incremented to the total, complete event is triggered right
// away.
// EnableTriggerComplete enables triggering complete event. It's effective
// only for bars which were constructed with `total <= 0` and after total
// has been set with `(*Bar).SetTotal(int64, false)`. If `curren >= total`
// at the moment of call, complete event is triggered right away.
func (b *Bar) EnableTriggerComplete() {
select {
case b.operateState <- func(s *bState) {
@ -200,11 +198,11 @@ func (b *Bar) EnableTriggerComplete() {
}
}
// SetTotal sets total to an arbitrary value. It's effective only for
// bar which was constructed with `total <= 0`. Setting total to negative
// value is equivalent to (*Bar).SetTotal((*Bar).Current(), bool) but faster.
// If triggerCompletion is true, total value is set to current and
// complete event is triggered right away.
// SetTotal sets total to an arbitrary value. It's effective only for bar
// which was constructed with `total <= 0`. Setting total to negative value
// is equivalent to `(*Bar).SetTotal((*Bar).Current(), bool)` but faster. If
// triggerCompletion is true, total value is set to current and complete
// event is triggered right away.
func (b *Bar) SetTotal(total int64, triggerCompletion bool) {
select {
case b.operateState <- func(s *bState) {
@ -344,7 +342,7 @@ func (b *Bar) SetPriority(priority int) {
// Abort interrupts bar's running goroutine. Abort won't be engaged
// if bar is already in complete state. If drop is true bar will be
// removed as well. To make sure that bar has been removed call
// (*Bar).Wait method.
// `(*Bar).Wait()` method.
func (b *Bar) Abort(drop bool) {
select {
case b.operateState <- func(s *bState) {
@ -415,7 +413,6 @@ func (b *Bar) serve(ctx context.Context, bs *bState) {
}
func (b *Bar) render(tw int) {
var done bool
fn := func(s *bState) {
var rows []io.Reader
stat := newStatistics(tw, s)
@ -437,7 +434,6 @@ func (b *Bar) render(tw int) {
shutdown: s.shutdown,
rmOnComplete: s.rmOnComplete,
noPop: s.noPop,
done: done,
}
if s.completed || s.aborted {
// post increment makes sure OnComplete decorators are rendered
@ -448,7 +444,6 @@ func (b *Bar) render(tw int) {
select {
case b.operateState <- fn:
case <-b.done:
done = true
fn(b.bs)
}
}

View File

@ -93,9 +93,9 @@ func WithAutoRefresh() ContainerOption {
}
}
// PopCompletedMode will pop completed bars to the top.
// To stop rendering bar after it has been popped, use
// mpb.BarRemoveOnComplete() option on that bar.
// PopCompletedMode pop completed bars out of progress container.
// In this mode completed bars get moved to the top and stop
// participating in rendering cycle.
func PopCompletedMode() ContainerOption {
return func(s *pState) {
s.popCompleted = true

View File

@ -18,8 +18,8 @@ const (
defaultRefreshRate = 150 * time.Millisecond
)
// DoneError represents an error when `*mpb.Progress` is done but its functionality is requested.
var DoneError = fmt.Errorf("%T instance can't be reused after it's done", (*Progress)(nil))
// DoneError represents use after `(*Progress).Wait()` error.
var DoneError = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil))
// Progress represents a container that renders one or more progress bars.
type Progress struct {
@ -55,13 +55,13 @@ type pState struct {
}
// New creates new Progress container instance. It's not possible to
// reuse instance after (*Progress).Wait method has been called.
// reuse instance after `(*Progress).Wait` method has been called.
func New(options ...ContainerOption) *Progress {
return NewWithContext(context.Background(), options...)
}
// NewWithContext creates new Progress container instance with provided
// context. It's not possible to reuse instance after (*Progress).Wait
// context. It's not possible to reuse instance after `(*Progress).Wait`
// method has been called.
func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
if ctx == nil {
@ -133,8 +133,7 @@ func (p *Progress) New(total int64, builder BarFillerBuilder, options ...BarOpti
// MustAdd creates a bar which renders itself by provided BarFiller.
// If `total <= 0` triggering complete event by increment methods is
// disabled. Panics if *Progress instance is done, i.e. called after
// (*Progress).Wait().
// disabled. Panics if called after `(*Progress).Wait()`.
func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption) *Bar {
bar, err := p.Add(total, filler, options...)
if err != nil {
@ -145,8 +144,8 @@ func (p *Progress) MustAdd(total int64, filler BarFiller, options ...BarOption)
// Add creates a bar which renders itself by provided BarFiller.
// If `total <= 0` triggering complete event by increment methods
// is disabled. If *Progress instance is done, i.e. called after
// (*Progress).Wait(), return error == DoneError.
// is disabled. If called after `(*Progress).Wait()` then
// `(nil, DoneError)` is returned.
func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Bar, error) {
if filler == nil {
filler = NopStyle().Build()
@ -203,7 +202,7 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) {
// UpdateBarPriority either immediately or lazy.
// With lazy flag order is updated after the next refresh cycle.
// If you don't care about laziness just use *Bar.SetPriority(int).
// If you don't care about laziness just use `(*Bar).SetPriority(int)`.
func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) {
if b == nil {
return
@ -215,9 +214,9 @@ func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) {
}
// Write is implementation of io.Writer.
// Writing to `*mpb.Progress` will print lines above a running bar.
// Writing to `*Progress` will print lines above a running bar.
// Writes aren't flushed immediately, but at next refresh cycle.
// If Write is called after `*mpb.Progress` is done, `mpb.DoneError`
// If called after `(*Progress).Wait()` then `(0, DoneError)`
// is returned.
func (p *Progress) Write(b []byte) (int, error) {
type result struct {
@ -238,7 +237,7 @@ func (p *Progress) Write(b []byte) (int, error) {
}
// Wait waits for all bars to complete and finally shutdowns container. After
// this method has been called, there is no way to reuse (*Progress) instance.
// this method has been called, there is no way to reuse `*Progress` instance.
func (p *Progress) Wait() {
// wait for user wg, if any
if p.uwg != nil {
@ -249,9 +248,9 @@ func (p *Progress) Wait() {
p.Shutdown()
}
// Shutdown cancels any running bar immediately and then shutdowns (*Progress)
// Shutdown cancels any running bar immediately and then shutdowns `*Progress`
// instance. Normally this method shouldn't be called unless you know what you
// are doing. Proper way to shutdown is to call (*Progress).Wait() instead.
// are doing. Proper way to shutdown is to call `(*Progress).Wait()` instead.
func (p *Progress) Shutdown() {
p.cancel()
p.pwg.Wait()
@ -357,7 +356,7 @@ func (s *pState) render(cw *cwriter.Writer) (err error) {
func (s *pState) flush(cw *cwriter.Writer, height int) error {
var wg sync.WaitGroup
defer wg.Wait() // waiting for all s.hm.push to complete
defer wg.Wait() // waiting for all s.push to complete
var popCount int
var rows []io.Reader
@ -381,40 +380,34 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error {
_, _ = io.Copy(io.Discard, row)
}
}
if frame.shutdown != 0 && !frame.done {
switch frame.shutdown {
case 1:
b.cancel()
if qb, ok := s.queueBars[b]; ok {
b.cancel()
delete(s.queueBars, b)
qb.priority = b.priority
wg.Add(1)
go func(b *Bar) {
s.hm.push(b, true)
wg.Done()
}(qb)
continue
go s.push(&wg, qb, true)
} else if s.popCompleted && !frame.noPop {
b.priority = s.popPriority
s.popPriority++
wg.Add(1)
go s.push(&wg, b, false)
} else if !frame.rmOnComplete {
wg.Add(1)
go s.push(&wg, b, false)
}
case 2:
if s.popCompleted && !frame.noPop {
switch frame.shutdown {
case 1:
b.priority = s.popPriority
s.popPriority++
default:
b.cancel()
popCount += usedRows
continue
}
} else if frame.rmOnComplete {
b.cancel()
popCount += usedRows
continue
} else {
b.cancel()
}
fallthrough
default:
wg.Add(1)
go s.push(&wg, b, false)
}
wg.Add(1)
go func(b *Bar) {
s.hm.push(b, false)
wg.Done()
}(b)
}
for i := len(rows) - 1; i >= 0; i-- {
@ -427,6 +420,11 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error {
return cw.Flush(len(rows) - popCount)
}
func (s pState) push(wg *sync.WaitGroup, b *Bar, sync bool) {
s.hm.push(b, sync)
wg.Done()
}
func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState {
bs := &bState{
id: s.idCount,

View File

@ -13,6 +13,7 @@ go.work.sum
gen/
/example/dice/dice
/example/fib/fib
/example/fib/traces.txt
/example/jaeger/jaeger

View File

@ -61,28 +61,63 @@ issues:
linters-settings:
depguard:
# Check the list against standard lib.
# Default: false
include-go-root: true
# A list of packages for the list type specified.
# Default: []
packages:
- "crypto/md5"
- "crypto/sha1"
- "crypto/**/pkix"
ignore-file-rules:
- "**/*_test.go"
additional-guards:
# Do not allow testing packages in non-test files.
- list-type: denylist
include-go-root: true
packages:
- testing
- github.com/stretchr/testify
ignore-file-rules:
- "**/*_test.go"
- "**/*test/*.go"
- "**/internal/matchers/*.go"
rules:
non-tests:
files:
- "!$test"
- "!**/*test/*.go"
- "!**/internal/matchers/*.go"
deny:
- pkg: "testing"
- pkg: "github.com/stretchr/testify"
- pkg: "crypto/md5"
- pkg: "crypto/sha1"
- pkg: "crypto/**/pkix"
otlp-internal:
files:
- "!**/exporters/otlp/internal/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
desc: Do not use cross-module internal packages.
otlptrace-internal:
files:
- "!**/exporters/otlp/otlptrace/*.go"
- "!**/exporters/otlp/otlptrace/internal/**.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
desc: Do not use cross-module internal packages.
otlpmetric-internal:
files:
- "!**/exporters/otlp/otlpmetric/internal/*.go"
- "!**/exporters/otlp/otlpmetric/internal/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
desc: Do not use cross-module internal packages.
otel-internal:
files:
- "**/sdk/*.go"
- "**/sdk/**/*.go"
- "**/exporters/*.go"
- "**/exporters/**/*.go"
- "**/schema/*.go"
- "**/schema/**/*.go"
- "**/metric/*.go"
- "**/metric/**/*.go"
- "**/bridge/*.go"
- "**/bridge/**/*.go"
- "**/example/*.go"
- "**/example/**/*.go"
- "**/trace/*.go"
- "**/trace/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/internal$"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/attribute"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/internaltest"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/matchers"
desc: Do not use cross-module internal packages.
godot:
exclude:
# Exclude links.

View File

@ -8,6 +8,164 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased]
## [1.19.0/0.42.0/0.0.7] 2023-09-28
This release contains the first stable release of the OpenTelemetry Go [metric SDK].
Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
### Changed
- Allow '/' characters in metric instrument names. (#4501)
- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
### Fixed
- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
### Removed
- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
This is a release candidate for the v1.19.0/v0.42.0 release.
That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Changed
- Allow '/' characters in metric instrument names. (#4501)
### Fixed
- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
## [1.18.0/0.41.0/0.0.6] 2023-09-12
This release drops the compatibility guarantee of [Go 1.19].
### Added
- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
### Changed
- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
### Deprecated
- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
### Removed
- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
## [1.17.0/0.40.0/0.0.5] 2023-08-28
### Added
- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
- Add support for exponential histogram aggregations.
A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
- Support Go 1.21. (#4463)
### Changed
- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
### Removed
- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
Use the added `WithProducer` option instead. (#4346)
- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
### Fixed
- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
### Deprecated
- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
OpenTelemetry dropped support for Jaeger exporter in July 2023.
Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
## [1.16.0/0.39.0] 2023-05-18
This release contains the first stable release of the OpenTelemetry Go [metric API].
@ -20,10 +178,14 @@ See our [versioning policy](VERSIONING.md) for more information about these stab
The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
### Changed
- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
### Removed
@ -188,6 +350,8 @@ This release drops the compatibility guarantee of [Go 1.18].
- Handle empty environment variable as it they were not set. (#3764)
- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
### Deprecated
@ -2492,7 +2656,11 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.16.0...HEAD
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.19.0...HEAD
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
@ -2563,5 +2731,7 @@ It contains api and sdk for trace and meter.
[Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18
[Go 1.19]: https://go.dev/doc/go1.19
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric

View File

@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
CODEOWNERS @MrAlias @MadVikingGod @pellared

View File

@ -179,23 +179,23 @@ For a deeper discussion, see
## Documentation
Each non-example Go Module should have its own `README.md` containing:
Each (non-internal, non-test) package must be documented using
[Go Doc Comments](https://go.dev/doc/comment),
preferably in a `doc.go` file.
- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
- Brief description.
- Installation instructions (and requirements if applicable).
- Hyperlink to an example. Depending on the component the example can be:
- An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
- A sample Go application with its own `README.md`, like [here](example/zipkin).
- Additional documentation sections such us:
- Configuration,
- Contributing,
- References.
Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
instead of putting code snippets in Go doc comments.
In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
You can install and run a "local Go Doc site" in the following way:
Moreover, it should be possible to navigate to any `README.md` from the
root `README.md`.
```sh
go install golang.org/x/pkgsite/cmd/pkgsite@latest
pkgsite
```
[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
is an example of a very well-documented package.
## Style Guide
@ -475,8 +475,33 @@ documentation are allowed to be extended with additional methods.
> Warning: methods may be added to this interface in minor releases.
These interfaces are defined by the OpenTelemetry specification and will be
updated as the specification evolves.
Otherwise, stable interfaces MUST NOT be modified.
#### How to Change Specification Interfaces
When an API change must be made, we will update the SDK with the new method one
release before the API change. This will allow the SDK one version before the
API change to work seamlessly with the new API.
If an incompatible version of the SDK is used with the new API the application
will fail to compile.
#### How Not to Change Specification Interfaces
We have explored using a v2 of the API to change interfaces and found that there
was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
Problems happened with libraries that upgraded to v2 when an application did not,
and would not produce any telemetry.
More detail of the approaches considered and their limitations can be found in
the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
issue.
#### How to Change Other Interfaces
If new functionality is needed for an interface that cannot be changed it MUST
be added by including an additional interface. That added interface can be a
simple interface for the specific functionality that you want to add or it can
@ -531,6 +556,37 @@ functionality should be added, each one will need their own super-set
interfaces and will duplicate the pattern. For this reason, the simple targeted
interface that defines the specific functionality should be preferred.
### Testing
The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the
absence of race conditions.
### Internal packages
The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling
between the two modules where a user can upgrade the parent without the child
and if the internal package API has changed it will fail to upgrade[^3].
There are two known exceptions to this rule:
- `go.opentelemetry.io/otel/internal/global`
- This package manages global state for all of opentelemetry-go. It needs to
be a single package in order to ensure the uniqueness of the global state.
- `go.opentelemetry.io/otel/internal/baggage`
- This package provides values in a `context.Context` that need to be
recognized by `go.opentelemetry.io/otel/baggage` and
`go.opentelemetry.io/otel/bridge/opentracing` but remain private.
If you have duplicate code in multiple modules, make that code into a Go
template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
to render the templates in the desired locations. See [#4404] for an example of
this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
## Approvers and Maintainers
### Approvers
@ -538,14 +594,14 @@ interface that defines the specific functionality should be preferred.
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Robert Pająk](https://github.com/pellared), Splunk
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
- [Robert Pająk](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus
@ -560,3 +616,5 @@ repo](https://github.com/open-telemetry/community/blob/main/community-membership
[Approver]: #approvers
[Maintainer]: #maintainers
[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404

View File

@ -25,7 +25,7 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit
.PHONY: precommit ci
precommit: generate dependabot-generate license-check vanity-import-fix misspell go-mod-tidy golangci-lint-fix test-default
precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
# Tools
@ -71,8 +71,14 @@ $(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
GOJQ = $(TOOLS)/gojq
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
GOTMPL = $(TOOLS)/gotmpl
$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
GORELEASE = $(TOOLS)/gorelease
$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
.PHONY: tools
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT)
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@ -110,13 +116,24 @@ $(CODESPELL): PACKAGE=codespell
# Generate
.PHONY: generate
generate: go-generate vanity-import-fix
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
generate/%: DIR=$*
generate/%: | $(STRINGER) $(PORTO)
.PHONY: go-generate
go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
go-generate/%: DIR=$*
go-generate/%: | $(STRINGER) $(GOTMPL)
@echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
# Generate go.work file for local development.
.PHONY: go-work
go-work: | $(CROSSLINK)
$(CROSSLINK) work --root=$(shell pwd)
# Build
@ -193,7 +210,7 @@ go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink
@echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \
&& $(GO) mod tidy -compat=1.19
&& $(GO) mod tidy -compat=1.20
.PHONY: lint-modules
lint-modules: go-mod-tidy
@ -203,11 +220,7 @@ lint: misspell lint-modules golangci-lint
.PHONY: vanity-import-check
vanity-import-check: | $(PORTO)
@$(PORTO) --include-internal -l . || echo "(run: make vanity-import-fix)"
.PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO)
@$(PORTO) --include-internal -w .
@$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
.PHONY: misspell
misspell: | $(MISSPELL)
@ -220,7 +233,7 @@ codespell: | $(CODESPELL)
.PHONY: license-check
license-check:
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
done); \
if [ -n "$${licRes}" ]; then \
echo "license header checking failed:"; echo "$${licRes}"; \
@ -230,7 +243,7 @@ license-check:
DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check
dependabot-check: | $(DBOTCONF)
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
.PHONY: dependabot-generate
dependabot-generate: | $(DBOTCONF)
@ -249,14 +262,23 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry specification tag"; exit 1 )
[ "$(OTEL_SPEC_REPO)" ] || ( echo "OTEL_SPEC_REPO unset: missing path to opentelemetry specification repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SPEC_REPO)/semantic_conventions/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease
gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
gorelease/%: DIR=$*
gorelease/%:| $(GORELEASE)
@echo "gorelease in $(DIR):" \
&& cd $(DIR) \
&& $(GORELEASE) \
|| echo ""
.PHONY: prerelease
prerelease: | $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )

View File

@ -11,22 +11,25 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status
| Signal | Status | Project |
| ------- | ---------- | ------- |
| Traces | Stable | N/A |
| Metrics | Beta | N/A |
| Logs | Frozen [1] | N/A |
| Signal | Status | Project |
|---------|------------|-----------------------|
| Traces | Stable | N/A |
| Metrics | Mixed [1] | [Go: Metric SDK (GA)] |
| Logs | Frozen [2] | N/A |
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
[Go: Metric SDK (GA)]: https://github.com/orgs/open-telemetry/projects/34
- [1]: [Metrics API](https://pkg.go.dev/go.opentelemetry.io/otel/metric) is Stable. [Metrics SDK](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric) is Beta.
- [2]: The Logs signal development is halted for this project while we stabilize the Metrics SDK.
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our local
Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
and
[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
Project versioning information and stability guarantees can be found in the
[versioning documentation](./VERSIONING.md).
[versioning documentation](VERSIONING.md).
### Compatibility
@ -49,17 +52,17 @@ stop ensuring compatibility with these versions in the following manner:
Currently, this project supports the following environments.
| OS | Go Version | Architecture |
| ------- | ---------- | ------------ |
|---------|------------|--------------|
| Ubuntu | 1.21 | amd64 |
| Ubuntu | 1.20 | amd64 |
| Ubuntu | 1.19 | amd64 |
| Ubuntu | 1.21 | 386 |
| Ubuntu | 1.20 | 386 |
| Ubuntu | 1.19 | 386 |
| MacOS | 1.21 | amd64 |
| MacOS | 1.20 | amd64 |
| MacOS | 1.19 | amd64 |
| Windows | 1.21 | amd64 |
| Windows | 1.20 | amd64 |
| Windows | 1.19 | amd64 |
| Windows | 1.21 | 386 |
| Windows | 1.20 | 386 |
| Windows | 1.19 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
@ -97,12 +100,11 @@ export pipeline to send that telemetry to an observability platform.
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
| Exporter | Metrics | Traces |
| :-----------------------------------: | :-----: | :----: |
| [Jaeger](./exporters/jaeger/) | | ✓ |
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
| [Prometheus](./exporters/prometheus/) | ✓ | |
| [stdout](./exporters/stdout/) | ✓ | ✓ |
| [Zipkin](./exporters/zipkin/) | | ✓ |
|---------------------------------------|:-------:|:------:|
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
| [Prometheus](./exporters/prometheus/) | ✓ | |
| [stdout](./exporters/stdout/) | ✓ | ✓ |
| [Zipkin](./exporters/zipkin/) | | ✓ |
## Contributing

View File

@ -2,27 +2,30 @@
## Semantic Convention Generation
New versions of the [OpenTelemetry Specification] mean new versions of the `semconv` package need to be generated.
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
The `semconv-generate` make target is used for this.
1. Checkout a local copy of the [OpenTelemetry Specification] to the desired release tag.
1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag.
2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest`
3. Run the `make semconv-generate ...` target from this repository.
For example,
```sh
export TAG="v1.13.0" # Change to the release version you are generating.
export OTEL_SPEC_REPO="/absolute/path/to/opentelemetry-specification"
export TAG="v1.21.0" # Change to the release version you are generating.
export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions"
docker pull otel/semconvgen:latest
make semconv-generate # Uses the exported TAG and OTEL_SPEC_REPO.
make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO.
```
This should create a new sub-package of [`semconv`](./semconv).
Ensure things look correct before submitting a pull request to include the addition.
**Note**, the generation code was changed to generate versions >= 1.13.
To generate versions prior to this, checkout the old release of this repository (i.e. [2fe8861](https://github.com/open-telemetry/opentelemetry-go/commit/2fe8861a24e20088c065b116089862caf9e3cd8b)).
## Breaking changes validation
You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
## Pre-Release
@ -120,7 +123,17 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry Specification]: https://github.com/open-telemetry/opentelemetry-specification
[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/
[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go
### Demo Repository
Bump the dependencies in the following Go services:
- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice)
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice)
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice)

60
vendor/go.opentelemetry.io/otel/attribute/filter.go generated vendored Normal file
View File

@ -0,0 +1,60 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute"
// Filter supports removing certain attributes from attribute sets. When
// the filter returns true, the attribute will be kept in the filtered
// attribute set. When the filter returns false, the attribute is excluded
// from the filtered attribute set, and the attribute instead appears in
// the removed list of excluded attributes.
type Filter func(KeyValue) bool
// NewAllowKeysFilter returns a Filter that only allows attributes with one of
// the provided keys.
//
// If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 {
return func(kv KeyValue) bool { return false }
}
allowed := make(map[Key]struct{})
for _, k := range keys {
allowed[k] = struct{}{}
}
return func(kv KeyValue) bool {
_, ok := allowed[kv.Key]
return ok
}
}
// NewDenyKeysFilter returns a Filter that only allows attributes
// that do not have one of the provided keys.
//
// If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 {
return func(kv KeyValue) bool { return true }
}
forbid := make(map[Key]struct{})
for _, k := range keys {
forbid[k] = struct{}{}
}
return func(kv KeyValue) bool {
_, ok := forbid[kv.Key]
return !ok
}
}

View File

@ -39,13 +39,6 @@ type (
iface interface{}
}
// Filter supports removing certain attributes from attribute sets. When
// the filter returns true, the attribute will be kept in the filtered
// attribute set. When the filter returns false, the attribute is excluded
// from the filtered attribute set, and the attribute instead appears in
// the removed list of excluded attributes.
Filter func(KeyValue) bool
// Sortable implements sort.Interface, used for sorting KeyValue. This is
// an exported type to support a memory optimization. A pointer to one of
// these is needed for the call to sort.Stable(), which the caller may

View File

@ -61,11 +61,6 @@ type Property struct {
// hasValue indicates if a zero-value value means the property does not
// have a value or if it was the zero-value.
hasValue bool
// hasData indicates whether the created property contains data or not.
// Properties that do not contain data are invalid with no other check
// required.
hasData bool
}
// NewKeyProperty returns a new Property for key.
@ -76,7 +71,7 @@ func NewKeyProperty(key string) (Property, error) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
p := Property{key: key, hasData: true}
p := Property{key: key}
return p, nil
}
@ -95,7 +90,6 @@ func NewKeyValueProperty(key, value string) (Property, error) {
key: key,
value: value,
hasValue: true,
hasData: true,
}
return p, nil
}
@ -117,7 +111,7 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
}
p := Property{hasData: true}
var p Property
if match[1] != "" {
p.key = match[1]
} else {
@ -136,10 +130,6 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err)
}
if !p.hasData {
return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
}
if !keyRe.MatchString(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
}

29
vendor/go.opentelemetry.io/otel/internal/gen.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/internal"
//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go

View File

@ -18,7 +18,6 @@ import (
"log"
"os"
"sync/atomic"
"unsafe"
)
var (
@ -42,7 +41,7 @@ type ErrorHandler interface {
}
type ErrDelegator struct {
delegate unsafe.Pointer
delegate atomic.Pointer[ErrorHandler]
}
func (d *ErrDelegator) Handle(err error) {
@ -50,12 +49,12 @@ func (d *ErrDelegator) Handle(err error) {
}
func (d *ErrDelegator) getDelegate() ErrorHandler {
return *(*ErrorHandler)(atomic.LoadPointer(&d.delegate))
return *d.delegate.Load()
}
// setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
atomic.StorePointer(&d.delegate, unsafe.Pointer(&eh))
d.delegate.Store(&eh)
}
func defaultErrorHandler() *ErrDelegator {

View File

@ -18,7 +18,6 @@ import (
"log"
"os"
"sync/atomic"
"unsafe"
"github.com/go-logr/logr"
"github.com/go-logr/stdr"
@ -28,7 +27,7 @@ import (
//
// The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level.
var globalLogger unsafe.Pointer
var globalLogger atomic.Pointer[logr.Logger]
func init() {
SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
@ -40,11 +39,11 @@ func init() {
// To see Info messages use a logger with `l.V(4).Enabled() == true`
// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
func SetLogger(l logr.Logger) {
atomic.StorePointer(&globalLogger, unsafe.Pointer(&l))
globalLogger.Store(&l)
}
func getLogger() logr.Logger {
return *(*logr.Logger)(atomic.LoadPointer(&globalLogger))
return *globalLogger.Load()
}
// Info prints messages about the general state of the API or SDK.

View File

@ -167,6 +167,8 @@ func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64Ob
}
// WithUnit sets the instrument unit.
//
// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
func WithUnit(u string) InstrumentOption { return unitOpt(u) }
// AddOption applies options to an addition measurement. See

View File

@ -157,6 +157,8 @@ type Meter interface {
//
// If no instruments are passed, f should not be registered nor called
// during collection.
//
// The function f needs to be concurrent safe.
RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
}

View File

@ -1 +1 @@
codespell==2.2.4
codespell==2.2.5

View File

@ -16,5 +16,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
return "1.16.0"
return "1.19.0"
}

View File

@ -14,19 +14,17 @@
module-sets:
stable-v1:
version: v1.16.0
version: v1.19.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opentracing
- go.opentelemetry.io/otel/bridge/opentracing/test
- go.opentelemetry.io/otel/example/dice
- go.opentelemetry.io/otel/example/fib
- go.opentelemetry.io/otel/example/jaeger
- go.opentelemetry.io/otel/example/namedtracer
- go.opentelemetry.io/otel/example/otel-collector
- go.opentelemetry.io/otel/example/passthrough
- go.opentelemetry.io/otel/example/zipkin
- go.opentelemetry.io/otel/exporters/jaeger
- go.opentelemetry.io/otel/exporters/otlp/internal/retry
- go.opentelemetry.io/otel/exporters/otlp/otlptrace
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
@ -34,23 +32,23 @@ module-sets:
- go.opentelemetry.io/otel/exporters/zipkin
- go.opentelemetry.io/otel/metric
- go.opentelemetry.io/otel/sdk
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
version: v0.39.0
version: v0.42.0
modules:
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/opencensus
- go.opentelemetry.io/otel/example/prometheus
- go.opentelemetry.io/otel/example/view
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- go.opentelemetry.io/otel/exporters/prometheus
- go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/bridge/opencensus
- go.opentelemetry.io/otel/bridge/opencensus/test
- go.opentelemetry.io/otel/example/view
experimental-schema:
version: v0.0.4
version: v0.0.7
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !purego
// +build !purego
// Package alias implements memory aliasing tests.
package alias

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build purego
// +build purego
// Package alias implements memory aliasing tests.
package alias

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !go1.13
// +build !go1.13
package poly1305

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.13
// +build go1.13
package poly1305

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego
// +build !amd64,!ppc64le,!s390x !gc purego
package poly1305

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package poly1305

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package poly1305

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package poly1305

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// +build amd64,!purego,gc
package salsa

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// +build amd64,!purego,gc
// This code was translated into a form compatible with 6a from the public
// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc
package salsa

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !gc || purego || !s390x
// +build !gc purego !s390x
package sha3

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build !amd64 || purego || !gc
// +build !amd64 purego !gc
package sha3

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// +build amd64,!purego,gc
package sha3

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build amd64 && !purego && gc
// +build amd64,!purego,gc
// This code was translated into a form compatible with 6a from the public
// domain sources at https://github.com/gvanas/KeccakCodePackage

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build go1.4
// +build go1.4
package sha3

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
package sha3

View File

@ -3,7 +3,6 @@
// license that can be found in the LICENSE file.
//go:build gc && !purego
// +build gc,!purego
#include "textflag.h"

Some files were not shown because too many files have changed in this diff Show More