mirror of
https://github.com/containers/skopeo.git
synced 2025-05-07 23:46:46 +00:00
fix(deps): update module github.com/containers/image/v5 to v5.34.0
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
parent
0eb4ad2f54
commit
b78a415987
go.modgo.sum
vendor
github.com
containerd/typeurl/v2
containers/image/v5
copy
directory
docker
daemon
distribution_error.godocker_client.godocker_image_dest.godocker_image_src.gointernal/tarfile
registries_d.gointernal
imagedestination
manifest
private
reflink
oci
openshift
pkg
signature
storage
version
coreos/go-oidc/v3/oidc
docker
distribution/registry/client/auth/challenge
docker
proglottis/gpgme
sigstore
rekor
CONTRIBUTORS.md
pkg
client
generated
client
entries
create_log_entry_responses.goentries_client.goget_log_entry_by_index_responses.goget_log_entry_by_uuid_responses.gosearch_log_query_responses.go
index
pubkey
tlog
models
util
sigstore/pkg/oauthflow
sylabs/sif/v2/pkg/sif
vbauerster/mpb/v8
go.opentelemetry.io
contrib/instrumentation/net/http/otelhttp
otel
42
go.mod
42
go.mod
@ -1,14 +1,14 @@
|
||||
module github.com/containers/skopeo
|
||||
|
||||
// Minimum required golang version
|
||||
go 1.22.6
|
||||
go 1.22.8
|
||||
|
||||
// Warning: Ensure the "go" and "toolchain" versions match exactly to prevent unwanted auto-updates
|
||||
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.3.1
|
||||
github.com/containers/common v0.61.1
|
||||
github.com/containers/image/v5 v5.33.1
|
||||
github.com/containers/image/v5 v5.34.0
|
||||
github.com/containers/ocicrypt v1.2.1
|
||||
github.com/containers/storage v1.57.1
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
@ -37,14 +37,14 @@ require (
|
||||
github.com/containerd/errdefs v0.3.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.0 // indirect
|
||||
github.com/containerd/typeurl/v2 v2.2.3 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.3.6 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/docker v27.3.1+incompatible // indirect
|
||||
github.com/docker/docker v27.5.1+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
@ -99,41 +99,41 @@ require (
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/proglottis/gpgme v0.1.4 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/sigstore/fulcio v1.6.4 // indirect
|
||||
github.com/sigstore/rekor v1.3.6 // indirect
|
||||
github.com/sigstore/sigstore v1.8.9 // indirect
|
||||
github.com/sigstore/rekor v1.3.8 // indirect
|
||||
github.com/sigstore/sigstore v1.8.12 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/smallstep/pkcs7 v0.1.1 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 // indirect
|
||||
github.com/sylabs/sif/v2 v2.19.1 // indirect
|
||||
github.com/sylabs/sif/v2 v2.20.2 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.12 // indirect
|
||||
github.com/vbatts/tar-split v0.11.7 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.8.3 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
|
||||
go.opentelemetry.io/otel v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.28.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.28.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect
|
||||
go.opentelemetry.io/otel v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.31.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/oauth2 v0.25.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect
|
||||
google.golang.org/grpc v1.68.1 // indirect
|
||||
google.golang.org/protobuf v1.35.1 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/grpc v1.69.4 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
)
|
||||
|
108
go.sum
108
go.sum
@ -39,20 +39,20 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
|
||||
github.com/containerd/typeurl/v2 v2.2.0 h1:6NBDbQzr7I5LHgp34xAXYF5DOTQDn05X58lsPEmzLso=
|
||||
github.com/containerd/typeurl/v2 v2.2.0/go.mod h1:8XOOxnyatxSWuG8OfsZXVnAF4iZfedjS/8UHSPJnX4g=
|
||||
github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40=
|
||||
github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk=
|
||||
github.com/containers/common v0.61.1 h1:jpk385ZFEx3MAX+sjwOoTZElvpgsGi0YJHuRmrhF/j8=
|
||||
github.com/containers/common v0.61.1/go.mod h1:C+TfkhTV+ADp1Hu+BMIAYPvSFix21swYo9PZuCKoSUM=
|
||||
github.com/containers/image/v5 v5.33.1 h1:nTWKwxAlY0aJrilvvhssqssJVnley6VqxkLiLzTEYIs=
|
||||
github.com/containers/image/v5 v5.33.1/go.mod h1:/FJiLlvVbeBxWNMPVPPIWJxHTAzwBoFvyN0a51zo1CE=
|
||||
github.com/containers/image/v5 v5.34.0 h1:HPqQaDUsox/3mC1pbOyLAIQEp0JhQqiUZ+6JiFIZLDI=
|
||||
github.com/containers/image/v5 v5.34.0/go.mod h1:/WnvUSEfdqC/ahMRd4YJDBLrpYWkGl018rB77iB3FDo=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM=
|
||||
github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ=
|
||||
github.com/containers/storage v1.57.1 h1:hKPoFsuBcB3qTzBxa4IFpZMRzUuL5Xhv/BE44W0XHx8=
|
||||
github.com/containers/storage v1.57.1/go.mod h1:i/Hb4lu7YgFr9G0K6BMjqW0BLJO1sFsnWQwj2UoWCUM=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0 h1:Ia3MxdwpSw702YW0xgfmP1GVCMA9aEFWu12XUZ3/OtI=
|
||||
github.com/coreos/go-oidc/v3 v3.11.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0 h1:sJk+8G2qq94rDI6ehZ71Bol3oUHy63qNYmkiSjrc/Jo=
|
||||
github.com/coreos/go-oidc/v3 v3.12.0/go.mod h1:gE3LgjOgFoHi9a4ce4/tJczr0Ai2/BoDhf0r5lltWI0=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
@ -64,12 +64,12 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=
|
||||
github.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v27.5.1+incompatible h1:JB9cieUT9YNiMITtIsguaN55PLOHhBSz3LKVc6cqWaY=
|
||||
github.com/docker/cli v27.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI=
|
||||
github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
@ -260,10 +260,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/proglottis/gpgme v0.1.3 h1:Crxx0oz4LKB3QXc5Ea0J19K/3ICfy3ftr5exgUK1AU0=
|
||||
github.com/proglottis/gpgme v0.1.3/go.mod h1:fPbW/EZ0LvwQtH8Hy7eixhp1eF3G39dtx7GUN+0Gmy0=
|
||||
github.com/prometheus/client_golang v1.20.2 h1:5ctymQzZlyOON1666svgwn3s6IKWgfbjsejTMiXIyjg=
|
||||
github.com/prometheus/client_golang v1.20.2/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/proglottis/gpgme v0.1.4 h1:3nE7YNA70o2aLjcg63tXMOhPD7bplfE5CBdV+hLAm2M=
|
||||
github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM=
|
||||
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
@ -281,8 +281,8 @@ github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06Oy
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sebdah/goldie/v2 v2.5.5 h1:rx1mwF95RxZ3/83sdS4Yp7t2C5TCokvWP4TBRbAyEWY=
|
||||
github.com/sebdah/goldie/v2 v2.5.5/go.mod h1:oZ9fp0+se1eapSRjfYbsV/0Hqhbuu3bJVvKI/NNtssI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0 h1:rf1HIbL64nUpEIZnjLZ3mcNEL9NBPB0iuVjyxvq3LZc=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.9.0/go.mod h1:DVHKMcZ+V4/woA/peqr+L0joiRXbPpQ042GgJckkFgw=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
@ -291,10 +291,10 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5I
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.6.4 h1:d86obfxUAG3Y6CYwOx1pdwCZwKmROB6w6927pKOVIRY=
|
||||
github.com/sigstore/fulcio v1.6.4/go.mod h1:Y6bn3i3KGhXpaHsAtYP3Z4Np0+VzCo1fLv8Ci6mbPDs=
|
||||
github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8=
|
||||
github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc=
|
||||
github.com/sigstore/sigstore v1.8.9 h1:NiUZIVWywgYuVTxXmRoTT4O4QAGiTEKup4N1wdxFadk=
|
||||
github.com/sigstore/sigstore v1.8.9/go.mod h1:d9ZAbNDs8JJfxJrYmulaTazU3Pwr8uLL9+mii4BNR3w=
|
||||
github.com/sigstore/rekor v1.3.8 h1:B8kJI8mpSIXova4Jxa6vXdJyysRxFGsEsLKBDl0rRjA=
|
||||
github.com/sigstore/rekor v1.3.8/go.mod h1:/dHFYKSuxEygfDRnEwyJ+ZD6qoVYNXQdi1mJrKvKWsI=
|
||||
github.com/sigstore/sigstore v1.8.12 h1:S8xMVZbE2z9ZBuQUEG737pxdLjnbOIcFi5v9UFfkJFc=
|
||||
github.com/sigstore/sigstore v1.8.12/go.mod h1:+PYQAa8rfw0QdPpBcT+Gl3egKD9c+TUgAlF12H3Nmjo=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
|
||||
@ -317,8 +317,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.19.1 h1:1eeMmFc8elqJe60ZiWwXgL3gMheb0IP4GmNZ4q0IEA0=
|
||||
github.com/sylabs/sif/v2 v2.19.1/go.mod h1:U1SUhvl8X1JIxAylC0DYz1fa/Xba6EMZD1dGPGBH83E=
|
||||
github.com/sylabs/sif/v2 v2.20.2 h1:HGEPzauCHhIosw5o6xmT3jczuKEuaFzSfdjAsH33vYw=
|
||||
github.com/sylabs/sif/v2 v2.20.2/go.mod h1:WyYryGRaR4Wp21SAymm5pK0p45qzZCSRiZMFvUZiuhc=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2 h1:xTHFutuitO2zqKAQ5rCROYgUb7Or/+IC3fts9/Yc7nM=
|
||||
github.com/tchap/go-patricia/v2 v2.3.2/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
@ -328,8 +328,8 @@ github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
|
||||
github.com/ulikunitz/xz v0.5.12/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.7 h1:ixZ93pO/GmvaZw4Vq9OwmfZK/kc2zKdPfu0B+gYqs3U=
|
||||
github.com/vbatts/tar-split v0.11.7/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vbauerster/mpb/v8 v8.8.3 h1:dTOByGoqwaTJYPubhVz3lO5O6MK553XVgUo33LdnNsQ=
|
||||
github.com/vbauerster/mpb/v8 v8.8.3/go.mod h1:JfCCrtcMsJwP6ZwMn9e5LMnNyp3TVNpUWWkN+nd4EWk=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1 h1:LH5R3lXPfE2e3lIGxN7WNWv3Hl5nWO6LRi2B0L0ERHw=
|
||||
github.com/vbauerster/mpb/v8 v8.9.1/go.mod h1:4XMvznPh8nfe2NpnDo1QTPvW9MVkUhbG90mPWvmOzcQ=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -354,20 +354,22 @@ go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
|
||||
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
|
||||
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
|
||||
go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY=
|
||||
go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
|
||||
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
|
||||
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
|
||||
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
|
||||
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0 h1:j9+03ymgYhPKmeXGk5Zu+cIZOlVzd9Zv7QIiyItjFBU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.28.0/go.mod h1:Y5+XiUG4Emn1hTfciPzGPJaSI+RpDts6BnCIir0SLqk=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys=
|
||||
go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
|
||||
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@ -380,8 +382,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo=
|
||||
golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
@ -413,11 +415,11 @@ golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
|
||||
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -468,8 +470,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@ -493,18 +495,18 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20240823204242-4ba0660f739c h1:TYOEhrQMrNDTAd2rX9m+WgGr8Ku6YNuj1D7OX6rWSok=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1 h1:hjSy6tcFQZ171igDaN5QHOw2n6vx40juYbC/x67CEhc=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:qpvKtACPCQhAdu3PyQgV4l3LMXZEtft7y8QcarRsp9I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
|
||||
google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -514,8 +516,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
|
||||
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
|
6
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
6
vendor/github.com/containerd/typeurl/v2/README.md
generated
vendored
@ -18,3 +18,9 @@ As a containerd sub-project, you will find the:
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
||||
|
||||
## Optional
|
||||
|
||||
By default, support for gogoproto is available along side the standard Google
|
||||
protobuf types.
|
||||
You can choose to leave gogo support out by using the `!no_gogo` build tag.
|
||||
|
89
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
89
vendor/github.com/containerd/typeurl/v2/types.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/reflect/protoregistry"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
@ -33,8 +32,16 @@ import (
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
registry = make(map[reflect.Type]string)
|
||||
handlers []handler
|
||||
)
|
||||
|
||||
type handler interface {
|
||||
Marshaller(interface{}) func() ([]byte, error)
|
||||
Unmarshaller(interface{}) func([]byte) error
|
||||
TypeURL(interface{}) string
|
||||
GetType(url string) (reflect.Type, bool)
|
||||
}
|
||||
|
||||
// Definitions of common error types used throughout typeurl.
|
||||
//
|
||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
||||
@ -112,9 +119,12 @@ func TypeURL(v interface{}) (string, error) {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
return string(t.ProtoReflect().Descriptor().FullName()), nil
|
||||
case gogoproto.Message:
|
||||
return gogoproto.MessageName(t), nil
|
||||
default:
|
||||
for _, h := range handlers {
|
||||
if u := h.TypeURL(v); u != "" {
|
||||
return u, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("type %s: %w", reflect.TypeOf(v), ErrNotFound)
|
||||
}
|
||||
}
|
||||
@ -149,12 +159,19 @@ func MarshalAny(v interface{}) (Any, error) {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return proto.Marshal(t)
|
||||
}
|
||||
case gogoproto.Message:
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return gogoproto.Marshal(t)
|
||||
}
|
||||
default:
|
||||
marshal = json.Marshal
|
||||
for _, h := range handlers {
|
||||
if m := h.Marshaller(v); m != nil {
|
||||
marshal = func(v interface{}) ([]byte, error) {
|
||||
return m()
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if marshal == nil {
|
||||
marshal = json.Marshal
|
||||
}
|
||||
}
|
||||
|
||||
url, err := TypeURL(v)
|
||||
@ -223,13 +240,13 @@ func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
}
|
||||
|
||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
||||
t, err := getTypeByUrl(typeURL)
|
||||
t, isProto, err := getTypeByUrl(typeURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if v == nil {
|
||||
v = reflect.New(t.t).Interface()
|
||||
v = reflect.New(t).Interface()
|
||||
} else {
|
||||
// Validate interface type provided by client
|
||||
vURL, err := TypeURL(v)
|
||||
@ -241,51 +258,45 @@ func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error)
|
||||
}
|
||||
}
|
||||
|
||||
if t.isProto {
|
||||
switch t := v.(type) {
|
||||
case proto.Message:
|
||||
err = proto.Unmarshal(value, t)
|
||||
case gogoproto.Message:
|
||||
err = gogoproto.Unmarshal(value, t)
|
||||
if isProto {
|
||||
pm, ok := v.(proto.Message)
|
||||
if ok {
|
||||
return v, proto.Unmarshal(value, pm)
|
||||
}
|
||||
|
||||
for _, h := range handlers {
|
||||
if unmarshal := h.Unmarshaller(v); unmarshal != nil {
|
||||
return v, unmarshal(value)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err = json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
return v, err
|
||||
// fallback to json unmarshaller
|
||||
return v, json.Unmarshal(value, v)
|
||||
}
|
||||
|
||||
type urlType struct {
|
||||
t reflect.Type
|
||||
isProto bool
|
||||
}
|
||||
|
||||
func getTypeByUrl(url string) (urlType, error) {
|
||||
func getTypeByUrl(url string) (_ reflect.Type, isProto bool, _ error) {
|
||||
mu.RLock()
|
||||
for t, u := range registry {
|
||||
if u == url {
|
||||
mu.RUnlock()
|
||||
return urlType{
|
||||
t: t,
|
||||
}, nil
|
||||
return t, false, nil
|
||||
}
|
||||
}
|
||||
mu.RUnlock()
|
||||
// fallback to proto registry
|
||||
t := gogoproto.MessageType(url)
|
||||
if t != nil {
|
||||
return urlType{
|
||||
// get the underlying Elem because proto returns a pointer to the type
|
||||
t: t.Elem(),
|
||||
isProto: true,
|
||||
}, nil
|
||||
}
|
||||
mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
|
||||
if err != nil {
|
||||
return urlType{}, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
if errors.Is(err, protoregistry.NotFound) {
|
||||
for _, h := range handlers {
|
||||
if t, isProto := h.GetType(url); t != nil {
|
||||
return t, isProto, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, false, fmt.Errorf("type with url %s: %w", url, ErrNotFound)
|
||||
}
|
||||
empty := mt.New().Interface()
|
||||
return urlType{t: reflect.TypeOf(empty).Elem(), isProto: true}, nil
|
||||
return reflect.TypeOf(empty).Elem(), true, nil
|
||||
}
|
||||
|
||||
func tryDereference(v interface{}) reflect.Type {
|
||||
|
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
68
vendor/github.com/containerd/typeurl/v2/types_gogo.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
//go:build !no_gogo
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package typeurl
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
gogoproto "github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
func init() {
|
||||
handlers = append(handlers, gogoHandler{})
|
||||
}
|
||||
|
||||
type gogoHandler struct{}
|
||||
|
||||
func (gogoHandler) Marshaller(v interface{}) func() ([]byte, error) {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return func() ([]byte, error) {
|
||||
return gogoproto.Marshal(pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) Unmarshaller(v interface{}) func([]byte) error {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return func(dt []byte) error {
|
||||
return gogoproto.Unmarshal(dt, pm)
|
||||
}
|
||||
}
|
||||
|
||||
func (gogoHandler) TypeURL(v interface{}) string {
|
||||
pm, ok := v.(gogoproto.Message)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return gogoproto.MessageName(pm)
|
||||
}
|
||||
|
||||
func (gogoHandler) GetType(url string) (reflect.Type, bool) {
|
||||
t := gogoproto.MessageType(url)
|
||||
if t == nil {
|
||||
return nil, false
|
||||
}
|
||||
return t.Elem(), true
|
||||
}
|
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
24
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -109,7 +109,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
}
|
||||
}
|
||||
|
||||
if err := checkImageDestinationForCurrentRuntime(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
if err := prepareImageConfigForDest(ctx, c.options.DestinationCtx, src, c.dest); err != nil {
|
||||
return copySingleImageResult{}, err
|
||||
}
|
||||
|
||||
@ -316,12 +316,15 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
|
||||
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
|
||||
// prepareImageConfigForDest enforces dest.MustMatchRuntimeOS and handles dest.NoteOriginalOCIConfig, if necessary.
|
||||
func prepareImageConfigForDest(ctx context.Context, sys *types.SystemContext, src types.Image, dest private.ImageDestination) error {
|
||||
ociConfig, configErr := src.OCIConfig(ctx)
|
||||
// Do not fail on configErr here, this might be an artifact
|
||||
// and maybe nothing needs this to be a container image and to process the config.
|
||||
|
||||
if dest.MustMatchRuntimeOS() {
|
||||
c, err := src.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", err)
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("parsing image configuration: %w", configErr)
|
||||
}
|
||||
wantedPlatforms := platform.WantedPlatforms(sys)
|
||||
|
||||
@ -331,7 +334,7 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
// For a transitional period, this might trigger warnings because the Variant
|
||||
// field was added to OCI config only recently. If this turns out to be too noisy,
|
||||
// revert this check to only look for (OS, Architecture).
|
||||
if platform.MatchesPlatform(c.Platform, wantedPlatform) {
|
||||
if platform.MatchesPlatform(ociConfig.Platform, wantedPlatform) {
|
||||
match = true
|
||||
break
|
||||
}
|
||||
@ -339,9 +342,14 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst
|
||||
}
|
||||
if !match {
|
||||
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q",
|
||||
c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", "))
|
||||
ociConfig.OS, ociConfig.Architecture, ociConfig.Variant, strings.Join(options.list, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
if err := dest.NoteOriginalOCIConfig(ociConfig, configErr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/directory/directory_dest.go
generated
vendored
@ -29,6 +29,7 @@ var ErrNotContainerImageDir = errors.New("not a containers image directory, don'
|
||||
type dirImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
||||
|
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
12
vendor/github.com/containers/image/v5/docker/daemon/client.go
generated
vendored
@ -3,6 +3,7 @@ package daemon
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@ -47,6 +48,7 @@ func newDockerClient(sys *types.SystemContext) (*dockerclient.Client, error) {
|
||||
}
|
||||
switch serverURL.Scheme {
|
||||
case "unix": // Nothing
|
||||
case "npipe": // Nothing
|
||||
case "http":
|
||||
hc := httpConfig()
|
||||
opts = append(opts, dockerclient.WithHTTPClient(hc))
|
||||
@ -82,6 +84,11 @@ func tlsConfig(sys *types.SystemContext) (*http.Client, error) {
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: tlsc,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}, nil
|
||||
@ -92,6 +99,11 @@ func httpConfig() *http.Client {
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
TLSClientConfig: nil,
|
||||
// In general we want to follow docker/daemon/client.defaultHTTPClient , as long as it doesn’t affect compatibility.
|
||||
// These idle connection limits really only apply to long-running clients, which is not our case here;
|
||||
// we include the same values purely for symmetry.
|
||||
MaxIdleConns: 6,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
},
|
||||
CheckRedirect: dockerclient.CheckRedirect,
|
||||
}
|
||||
|
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
8
vendor/github.com/containers/image/v5/docker/distribution_error.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
dockerChallenge "github.com/docker/distribution/registry/client/auth/challenge"
|
||||
)
|
||||
|
||||
// errNoErrorsInBody is returned when an HTTP response body parses to an empty
|
||||
@ -114,10 +113,11 @@ func mergeErrors(err1, err2 error) error {
|
||||
// UnexpectedHTTPStatusError returned for response code outside of expected
|
||||
// range.
|
||||
func handleErrorResponse(resp *http.Response) error {
|
||||
if resp.StatusCode >= 400 && resp.StatusCode < 500 {
|
||||
switch {
|
||||
case resp.StatusCode == http.StatusUnauthorized:
|
||||
// Check for OAuth errors within the `WWW-Authenticate` header first
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
for _, c := range dockerChallenge.ResponseChallenges(resp) {
|
||||
for _, c := range parseAuthHeader(resp.Header) {
|
||||
if c.Scheme == "bearer" {
|
||||
var err errcode.Error
|
||||
// codes defined at https://tools.ietf.org/html/rfc6750#section-3.1
|
||||
@ -138,6 +138,8 @@ func handleErrorResponse(resp *http.Response) error {
|
||||
return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body))
|
||||
}
|
||||
}
|
||||
fallthrough
|
||||
case resp.StatusCode >= 400 && resp.StatusCode < 500:
|
||||
err := parseHTTPErrorResponse(resp.StatusCode, resp.Body)
|
||||
if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 {
|
||||
return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response)
|
||||
|
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
13
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -1056,6 +1056,15 @@ func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info ty
|
||||
func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) {
|
||||
// Note that this copies all kinds of attachments: attestations, and whatever else is there,
|
||||
// not just signatures. We leave the signature consumers to decide based on the MIME type.
|
||||
|
||||
if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check
|
||||
return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err)
|
||||
}
|
||||
digestAlgorithm := desc.Digest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String())
|
||||
}
|
||||
|
||||
reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1065,6 +1074,10 @@ func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerR
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err)
|
||||
}
|
||||
actualDigest := digestAlgorithm.FromBytes(payload)
|
||||
if actualDigest != desc.Digest {
|
||||
return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String())
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/docker_image_dest.go
generated
vendored
@ -41,6 +41,7 @@ import (
|
||||
type dockerImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
ref dockerReference
|
||||
|
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
4
vendor/github.com/containers/image/v5/docker/docker_image_src.go
generated
vendored
@ -340,6 +340,10 @@ func handle206Response(streams chan io.ReadCloser, errs chan error, body io.Read
|
||||
}
|
||||
return
|
||||
}
|
||||
if parts >= len(chunks) {
|
||||
errs <- errors.New("too many parts returned by the server")
|
||||
break
|
||||
}
|
||||
s := signalCloseReader{
|
||||
closed: make(chan struct{}),
|
||||
stream: p,
|
||||
|
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
1
vendor/github.com/containers/image/v5/docker/internal/tarfile/dest.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
type Destination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
|
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
6
vendor/github.com/containers/image/v5/docker/registries_d.go
generated
vendored
@ -3,6 +3,7 @@ package docker
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
@ -129,6 +130,11 @@ func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
|
||||
configPath := filepath.Join(dirPath, configName)
|
||||
configBytes, err := os.ReadFile(configPath)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
16
vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go
generated
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
package stubs
|
||||
|
||||
import (
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing.
|
||||
type IgnoresOriginalOCIConfig struct{}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return nil
|
||||
}
|
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
1
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
// wrapped provides the private.ImageDestination operations
|
||||
// for a destination that only implements types.ImageDestination
|
||||
type wrapped struct {
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
|
||||
types.ImageDestination
|
||||
|
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
32
vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go
generated
vendored
@ -74,20 +74,20 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat
|
||||
|
||||
// UpdateInstances updates the sizes, digests, and media types of the manifests
|
||||
// which the list catalogs.
|
||||
func (index *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
|
||||
editInstances := []ListEdit{}
|
||||
for i, instance := range updates {
|
||||
editInstances = append(editInstances, ListEdit{
|
||||
UpdateOldDigest: index.Manifests[i].Digest,
|
||||
UpdateOldDigest: list.Manifests[i].Digest,
|
||||
UpdateDigest: instance.Digest,
|
||||
UpdateSize: instance.Size,
|
||||
UpdateMediaType: instance.MediaType,
|
||||
ListOperation: ListOpUpdate})
|
||||
}
|
||||
return index.editInstances(editInstances)
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
addedEntries := []Schema2ManifestDescriptor{}
|
||||
for i, editInstance := range editInstances {
|
||||
switch editInstance.ListOperation {
|
||||
@ -98,21 +98,21 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if err := editInstance.UpdateDigest.Validate(); err != nil {
|
||||
return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err)
|
||||
}
|
||||
targetIndex := slices.IndexFunc(index.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool {
|
||||
return m.Digest == editInstance.UpdateOldDigest
|
||||
})
|
||||
if targetIndex == -1 {
|
||||
return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest)
|
||||
}
|
||||
index.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
list.Manifests[targetIndex].Digest = editInstance.UpdateDigest
|
||||
if editInstance.UpdateSize < 0 {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize)
|
||||
}
|
||||
index.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
list.Manifests[targetIndex].Size = editInstance.UpdateSize
|
||||
if editInstance.UpdateMediaType == "" {
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType)
|
||||
return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType)
|
||||
}
|
||||
index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType
|
||||
case ListOpAdd:
|
||||
if editInstance.AddPlatform == nil {
|
||||
// Should we create a struct with empty fields instead?
|
||||
@ -135,13 +135,13 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error {
|
||||
if len(addedEntries) != 0 {
|
||||
// slices.Clone() here to ensure a private backing array;
|
||||
// an external caller could have manually created Schema2ListPublic with a slice with extra capacity.
|
||||
index.Manifests = append(slices.Clone(index.Manifests), addedEntries...)
|
||||
list.Manifests = append(slices.Clone(list.Manifests), addedEntries...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (index *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return index.editInstances(editInstances)
|
||||
func (list *Schema2List) EditInstances(editInstances []ListEdit) error {
|
||||
return list.editInstances(editInstances)
|
||||
}
|
||||
|
||||
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
|
||||
@ -280,12 +280,12 @@ func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List {
|
||||
return &Schema2List{*public}
|
||||
}
|
||||
|
||||
func (index *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&index.Schema2ListPublic))
|
||||
func (list *Schema2List) CloneInternal() List {
|
||||
return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic))
|
||||
}
|
||||
|
||||
func (index *Schema2List) Clone() ListPublic {
|
||||
return index.CloneInternal()
|
||||
func (list *Schema2List) Clone() ListPublic {
|
||||
return list.CloneInternal()
|
||||
}
|
||||
|
||||
// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled
|
||||
|
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
compression "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// ImageSourceInternalOnly is the part of private.ImageSource that is not
|
||||
@ -41,6 +42,12 @@ type ImageDestinationInternalOnly interface {
|
||||
// FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures
|
||||
// on unsupported formats.
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
22
vendor/github.com/containers/image/v5/internal/reflink/reflink_linux.go
generated
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
//go:build linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, dst.Fd(), unix.FICLONE, src.Fd())
|
||||
if errno == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/internal/reflink/reflink_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
//go:build !linux
|
||||
|
||||
package reflink
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// LinkOrCopy attempts to reflink the source to the destination fd.
|
||||
// If reflinking fails or is unsupported, it falls back to io.Copy().
|
||||
func LinkOrCopy(src, dst *os.File) error {
|
||||
_, err := io.Copy(dst, src)
|
||||
return err
|
||||
}
|
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -14,6 +14,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -103,6 +104,14 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.unpackedDest.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *ociArchiveImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.unpackedDest.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
31
vendor/github.com/containers/image/v5/oci/internal/oci_util.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@ -98,7 +99,7 @@ func ValidateScope(scope string) error {
|
||||
}
|
||||
|
||||
func validateScopeWindows(scope string) error {
|
||||
matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope))
|
||||
matched, _ := regexp.MatchString(`^[a-zA-Z]:\\`, scope)
|
||||
if !matched {
|
||||
return fmt.Errorf("Invalid scope '%s'. Must be an absolute path", scope)
|
||||
}
|
||||
@ -119,3 +120,31 @@ func validateScopeNonWindows(scope string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseOCIReferenceName parses the image from the oci reference.
|
||||
func parseOCIReferenceName(image string) (img string, index int, err error) {
|
||||
index = -1
|
||||
if strings.HasPrefix(image, "@") {
|
||||
idx, err := strconv.Atoi(image[1:])
|
||||
if err != nil {
|
||||
return "", index, fmt.Errorf("Invalid source index @%s: not an integer: %w", image[1:], err)
|
||||
}
|
||||
if idx < 0 {
|
||||
return "", index, fmt.Errorf("Invalid source index @%d: must not be negative", idx)
|
||||
}
|
||||
index = idx
|
||||
} else {
|
||||
img = image
|
||||
}
|
||||
return img, index, nil
|
||||
}
|
||||
|
||||
// ParseReferenceIntoElements splits the oci reference into location, image name and source index if exists
|
||||
func ParseReferenceIntoElements(reference string) (string, string, int, error) {
|
||||
dir, image := SplitPathAndImage(reference)
|
||||
image, index, err := parseOCIReferenceName(image)
|
||||
if err != nil {
|
||||
return "", "", -1, err
|
||||
}
|
||||
return dir, image, index, nil
|
||||
}
|
||||
|
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
97
vendor/github.com/containers/image/v5/oci/layout/oci_dest.go
generated
vendored
@ -17,6 +17,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/reflink"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/fileutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -27,6 +28,7 @@ import (
|
||||
type ociImageDestination struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.IgnoresOriginalOCIConfig
|
||||
stubs.NoPutBlobPartialInitialize
|
||||
stubs.NoSignaturesInitialize
|
||||
|
||||
@ -37,6 +39,9 @@ type ociImageDestination struct {
|
||||
|
||||
// newImageDestination returns an ImageDestination for writing to an existing directory.
|
||||
func newImageDestination(sys *types.SystemContext, ref ociReference) (private.ImageDestination, error) {
|
||||
if ref.sourceIndex != -1 {
|
||||
return nil, fmt.Errorf("Destination reference must not contain a manifest index @%d", ref.sourceIndex)
|
||||
}
|
||||
var index *imgspecv1.Index
|
||||
if indexExists(ref) {
|
||||
var err error
|
||||
@ -137,9 +142,21 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
if inputInfo.Size != -1 && size != inputInfo.Size {
|
||||
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
|
||||
}
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &explicitClosed); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// blobFileSyncAndRename syncs the specified blobFile on the filesystem and renames it to the
|
||||
// specific blob path determined by the blobDigest. The closed pointer indicates to the caller
|
||||
// whether blobFile has been closed or not.
|
||||
func (d *ociImageDestination) blobFileSyncAndRename(blobFile *os.File, blobDigest digest.Digest, closed *bool) error {
|
||||
if err := blobFile.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
|
||||
// On Windows, the “permissions of newly created files” argument to syscall.Open is
|
||||
@ -147,26 +164,27 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
|
||||
// always fails on Windows.
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := blobFile.Chmod(0644); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
if err := ensureParentDirectoryExists(blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
|
||||
// need to explicitly close the file, since a rename won't otherwise not work on Windows
|
||||
// need to explicitly close the file, since a rename won't otherwise work on Windows
|
||||
blobFile.Close()
|
||||
explicitClosed = true
|
||||
*closed = true
|
||||
|
||||
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
return err
|
||||
}
|
||||
succeeded = true
|
||||
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
@ -299,6 +317,67 @@ func (d *ociImageDestination) CommitWithOptions(ctx context.Context, options pri
|
||||
return os.WriteFile(d.ref.indexPath(), indexJSON, 0644)
|
||||
}
|
||||
|
||||
// PutBlobFromLocalFileOption is unused but may receive functionality in the future.
|
||||
type PutBlobFromLocalFileOption struct{}
|
||||
|
||||
// PutBlobFromLocalFile arranges the data from path to be used as blob with digest.
|
||||
// It computes, and returns, the digest and size of the used file.
|
||||
//
|
||||
// This function can be used instead of dest.PutBlob() where the ImageDestination requires PutBlob() to be called.
|
||||
func PutBlobFromLocalFile(ctx context.Context, dest types.ImageDestination, file string, options ...PutBlobFromLocalFileOption) (digest.Digest, int64, error) {
|
||||
d, ok := dest.(*ociImageDestination)
|
||||
if !ok {
|
||||
return "", -1, errors.New("internal error: PutBlobFromLocalFile called with a non-oci: destination")
|
||||
}
|
||||
|
||||
succeeded := false
|
||||
blobFileClosed := false
|
||||
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer func() {
|
||||
if !blobFileClosed {
|
||||
blobFile.Close()
|
||||
}
|
||||
if !succeeded {
|
||||
os.Remove(blobFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
srcFile, err := os.Open(file)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
err = reflink.LinkOrCopy(srcFile, blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
_, err = blobFile.Seek(0, io.SeekStart)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
blobDigest, err := digest.FromReader(blobFile)
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
fileInfo, err := blobFile.Stat()
|
||||
if err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
if err := d.blobFileSyncAndRename(blobFile, blobDigest, &blobFileClosed); err != nil {
|
||||
return "", -1, err
|
||||
}
|
||||
|
||||
succeeded = true
|
||||
return blobDigest, fileInfo.Size(), nil
|
||||
}
|
||||
|
||||
func ensureDirectoryExists(path string) error {
|
||||
if err := fileutils.Exists(path); err != nil && errors.Is(err, fs.ErrNotExist) {
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
|
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
75
vendor/github.com/containers/image/v5/oci/layout/oci_transport.go
generated
vendored
@ -61,22 +61,31 @@ type ociReference struct {
|
||||
// (But in general, we make no attempt to be completely safe against concurrent hostile filesystem modifications.)
|
||||
dir string // As specified by the user. May be relative, contain symlinks, etc.
|
||||
resolvedDir string // Absolute path with no symlinks, at least at the time of its creation. Primarily used for policy namespaces.
|
||||
// If image=="", it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json
|
||||
// If image=="" && sourceIndex==-1, it means the "only image" in the index.json is used in the case it is a source
|
||||
// for destinations, the image name annotation "image.ref.name" is not added to the index.json.
|
||||
//
|
||||
// Must not be set if sourceIndex is set (the value is not -1).
|
||||
image string
|
||||
// If not -1, a zero-based index of an image in the manifest index. Valid only for sources.
|
||||
// Must not be set if image is set.
|
||||
sourceIndex int
|
||||
}
|
||||
|
||||
// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference.
|
||||
func ParseReference(reference string) (types.ImageReference, error) {
|
||||
dir, image := internal.SplitPathAndImage(reference)
|
||||
return NewReference(dir, image)
|
||||
dir, image, index, err := internal.ParseReferenceIntoElements(reference)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newReference(dir, image, index)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
// newReference returns an OCI reference for a directory, and an image name annotation or sourceIndex.
|
||||
//
|
||||
// If sourceIndex==-1, the index will not be valid to point out the source image, only image will be used.
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
func newReference(dir, image string, sourceIndex int) (types.ImageReference, error) {
|
||||
resolved, err := explicitfilepath.ResolvePathToFullyExplicit(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -90,7 +99,26 @@ func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil
|
||||
if sourceIndex != -1 && sourceIndex < 0 {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: index @%d must not be negative", sourceIndex)
|
||||
}
|
||||
if sourceIndex != -1 && image != "" {
|
||||
return nil, fmt.Errorf("Invalid oci: layout reference: cannot use both an image %s and a source index @%d", image, sourceIndex)
|
||||
}
|
||||
return ociReference{dir: dir, resolvedDir: resolved, image: image, sourceIndex: sourceIndex}, nil
|
||||
}
|
||||
|
||||
// NewIndexReference returns an OCI reference for a path and a zero-based source manifest index.
|
||||
func NewIndexReference(dir string, sourceIndex int) (types.ImageReference, error) {
|
||||
return newReference(dir, "", sourceIndex)
|
||||
}
|
||||
|
||||
// NewReference returns an OCI reference for a directory and a image.
|
||||
//
|
||||
// We do not expose an API supplying the resolvedDir; we could, but recomputing it
|
||||
// is generally cheap enough that we prefer being confident about the properties of resolvedDir.
|
||||
func NewReference(dir, image string) (types.ImageReference, error) {
|
||||
return newReference(dir, image, -1)
|
||||
}
|
||||
|
||||
func (ref ociReference) Transport() types.ImageTransport {
|
||||
@ -103,7 +131,10 @@ func (ref ociReference) Transport() types.ImageTransport {
|
||||
// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa.
|
||||
// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
|
||||
func (ref ociReference) StringWithinTransport() string {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
if ref.sourceIndex == -1 {
|
||||
return fmt.Sprintf("%s:%s", ref.dir, ref.image)
|
||||
}
|
||||
return fmt.Sprintf("%s:@%d", ref.dir, ref.sourceIndex)
|
||||
}
|
||||
|
||||
// DockerReference returns a Docker reference associated with this reference
|
||||
@ -187,14 +218,18 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||
return imgspecv1.Descriptor{}, -1, err
|
||||
}
|
||||
|
||||
if ref.image == "" {
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
switch {
|
||||
case ref.image != "" && ref.sourceIndex != -1: // Coverage: newReference refuses to create such references.
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
||||
ref.image, ref.sourceIndex)
|
||||
|
||||
case ref.sourceIndex != -1:
|
||||
if ref.sourceIndex >= len(index.Manifests) {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("index %d is too large, only %d entries available", ref.sourceIndex, len(index.Manifests))
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
} else {
|
||||
return index.Manifests[ref.sourceIndex], ref.sourceIndex, nil
|
||||
|
||||
case ref.image != "":
|
||||
// if image specified, look through all manifests for a match
|
||||
var unsupportedMIMETypes []string
|
||||
for i, md := range index.Manifests {
|
||||
@ -208,8 +243,16 @@ func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, int, erro
|
||||
if len(unsupportedMIMETypes) != 0 {
|
||||
return imgspecv1.Descriptor{}, -1, fmt.Errorf("reference %q matches unsupported manifest MIME types %q", ref.image, unsupportedMIMETypes)
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
|
||||
default:
|
||||
// return manifest if only one image is in the oci directory
|
||||
if len(index.Manifests) != 1 {
|
||||
// ask user to choose image when more than one image in the oci directory
|
||||
return imgspecv1.Descriptor{}, -1, ErrMoreThanOneImage
|
||||
}
|
||||
return index.Manifests[0], 0, nil
|
||||
}
|
||||
return imgspecv1.Descriptor{}, -1, ImageNotFoundError{ref}
|
||||
}
|
||||
|
||||
// LoadManifestDescriptor loads the manifest descriptor to be used to retrieve the image name
|
||||
|
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
52
vendor/github.com/containers/image/v5/oci/layout/reader.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package layout
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// This file is named reader.go for consistency with other transports’
|
||||
// handling of “image containers”, but we don’t actually need a stateful reader object.
|
||||
|
||||
// ListResult wraps the image reference and the manifest for loading
|
||||
type ListResult struct {
|
||||
Reference types.ImageReference
|
||||
ManifestDescriptor imgspecv1.Descriptor
|
||||
}
|
||||
|
||||
// List returns a slice of manifests included in the archive
|
||||
func List(dir string) ([]ListResult, error) {
|
||||
var res []ListResult
|
||||
|
||||
indexJSON, err := os.ReadFile(filepath.Join(dir, imgspecv1.ImageIndexFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var index imgspecv1.Index
|
||||
if err := json.Unmarshal(indexJSON, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for manifestIndex, md := range index.Manifests {
|
||||
refName := md.Annotations[imgspecv1.AnnotationRefName]
|
||||
index := -1
|
||||
if refName == "" {
|
||||
index = manifestIndex
|
||||
}
|
||||
ref, err := newReference(dir, refName, index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating image reference: %w", err)
|
||||
}
|
||||
reference := ListResult{
|
||||
Reference: ref,
|
||||
ManifestDescriptor: md,
|
||||
}
|
||||
res = append(res, reference)
|
||||
}
|
||||
return res, nil
|
||||
}
|
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
9
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"github.com/containers/image/v5/manifest"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
type openshiftImageDestination struct {
|
||||
@ -111,6 +112,14 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
|
||||
return d.docker.SupportsPutBlobPartial()
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (d *openshiftImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
return d.docker.NoteOriginalOCIConfig(ociConfig, configErr)
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
|
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
6
vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package sysregistriesv2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
@ -744,6 +745,11 @@ func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedC
|
||||
// Enforce v2 format for drop-in-configs.
|
||||
dropIn, err := loadConfigFile(path, true)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err)
|
||||
}
|
||||
config.updateWithConfigurationFrom(dropIn)
|
||||
|
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
10
vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go
generated
vendored
@ -3,6 +3,7 @@ package tlsclientconfig
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -36,12 +37,9 @@ func SetupCertificates(dir string, tlsc *tls.Config) error {
|
||||
logrus.Debugf(" crt: %s", fullPath)
|
||||
data, err := os.ReadFile(fullPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// Dangling symbolic link?
|
||||
// Race with someone who deleted the
|
||||
// file after we read the directory's
|
||||
// list of contents?
|
||||
logrus.Warnf("error reading certificate %q: %v", fullPath, err)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
// file must have been removed between the directory listing
|
||||
// and the open call, ignore that as it is a expected race
|
||||
continue
|
||||
}
|
||||
return err
|
||||
|
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
@ -20,7 +20,7 @@ func (f *fulcioTrustRoot) validate() error {
|
||||
return errors.New("fulcio disabled at compile-time")
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
func verifyRekorFulcio(rekorPublicKeys []*ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
return nil, errors.New("fulcio disabled at compile-time")
|
||||
|
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/errors.go
generated
vendored
@ -13,3 +13,12 @@ func (err InvalidSignatureError) Error() string {
|
||||
func NewInvalidSignatureError(msg string) InvalidSignatureError {
|
||||
return InvalidSignatureError{msg: msg}
|
||||
}
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
9
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
@ -40,15 +40,6 @@ type UntrustedRekorPayload struct {
|
||||
// A compile-time check that UntrustedRekorSET implements json.Unmarshaler
|
||||
var _ json.Unmarshaler = (*UntrustedRekorSET)(nil)
|
||||
|
||||
// JSONFormatToInvalidSignatureError converts JSONFormatError to InvalidSignatureError.
|
||||
// All other errors are returned as is.
|
||||
func JSONFormatToInvalidSignatureError(err error) error {
|
||||
if formatErr, ok := err.(JSONFormatError); ok {
|
||||
err = NewInvalidSignatureError(formatErr.Error())
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the json.Unmarshaler interface
|
||||
func (s *UntrustedRekorSET) UnmarshalJSON(data []byte) error {
|
||||
return JSONFormatToInvalidSignatureError(s.strictUnmarshalJSON(data))
|
||||
|
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
2
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
@ -10,6 +10,6 @@ import (
|
||||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
func VerifyRekorSET(publicKeys []*ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||
}
|
||||
|
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
580
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -17,11 +17,13 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/image"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
srcImpl "github.com/containers/image/v5/internal/imagesource/impl"
|
||||
srcStubs "github.com/containers/image/v5/internal/imagesource/stubs"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/internal/putblobdigest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/internal/tmpdir"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
@ -31,6 +33,7 @@ import (
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked"
|
||||
"github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -57,8 +60,9 @@ type storageImageDestination struct {
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
manifest []byte // (Per-instance) manifest contents, or nil if not yet known.
|
||||
manifestMIMEType string // Valid if manifest != nil
|
||||
manifestDigest digest.Digest // Valid if manifest != nil
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs (not even validated to be valid digest.Digest!); or nil if not read yet
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
@ -108,8 +112,10 @@ type storageImageDestinationLockProtected struct {
|
||||
//
|
||||
// Ideally we wouldn’t have blobDiffIDs, and we would just keep records by index, but the public API does not require the caller
|
||||
// to provide layer indices; and configs don’t have layer indices. blobDiffIDs needs to exist for those cases.
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest
|
||||
indexToDiffID map[int]digest.Digest // Mapping from layer index to DiffID
|
||||
// Mapping from layer index to a TOC Digest.
|
||||
// If this is set, then either c/storage/pkg/chunked/toc.GetTOCDigest must have returned a value, or indexToDiffID must be set as well.
|
||||
indexToTOCDigest map[int]digest.Digest
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs. CAREFUL: See the WARNING above.
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, indexToAdditionalLayer, filenames)
|
||||
@ -121,6 +127,9 @@ type storageImageDestinationLockProtected struct {
|
||||
filenames map[digest.Digest]string
|
||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||
fileSizes map[digest.Digest]int64
|
||||
|
||||
// Config
|
||||
configDigest digest.Digest // "" if N/A or not known yet.
|
||||
}
|
||||
|
||||
// addedLayerInfo records data about a layer to use in this image.
|
||||
@ -201,6 +210,18 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1)))
|
||||
}
|
||||
|
||||
// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format,
|
||||
// or an error obtaining that value (e.g. if the image is an artifact and not a container image).
|
||||
// The destination can use it in its TryReusingBlob/PutBlob implementations
|
||||
// (otherwise it only obtains the final config after all layers are written).
|
||||
func (s *storageImageDestination) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error {
|
||||
if configErr != nil {
|
||||
return fmt.Errorf("writing to c/storage without a valid image config: %w", configErr)
|
||||
}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(ociConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PutBlobWithOptions writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
@ -214,7 +235,17 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
|
||||
return info, err
|
||||
}
|
||||
|
||||
if options.IsConfig || options.LayerIndex == nil {
|
||||
if options.IsConfig {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
if s.lockProtected.configDigest != "" {
|
||||
return private.UploadedBlob{}, fmt.Errorf("after config %q, refusing to record another config %q",
|
||||
s.lockProtected.configDigest.String(), info.Digest.String())
|
||||
}
|
||||
s.lockProtected.configDigest = info.Digest
|
||||
return info, nil
|
||||
}
|
||||
if options.LayerIndex == nil {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@ -315,6 +346,56 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
||||
// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions.
|
||||
// The fallback _must not_ be done otherwise.
|
||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (_ private.UploadedBlob, retErr error) {
|
||||
inputTOCDigest, err := toc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
// The identity of partially-pulled layers is, as long as we keep compatibility with tar-like consumers,
|
||||
// unfixably ambiguous: there are two possible “views” of the same file (same compressed digest),
|
||||
// the traditional “view” that decompresses the primary stream and consumes a tar file,
|
||||
// and the partial-pull “view” that starts with the TOC.
|
||||
// The two “views” have two separate metadata sets and may refer to different parts of the blob for file contents;
|
||||
// the direct way to ensure they are consistent would be to read the full primary stream (and authenticate it against
|
||||
// the compressed digest), and ensure the metadata and layer contents exactly match the partially-pulled contents -
|
||||
// making the partial pull completely pointless.
|
||||
//
|
||||
// Instead, for partial-pull-capable layers (with inputTOCDigest set), we require the image to “commit”
|
||||
// to uncompressed layer digest values via the config's RootFS.DiffIDs array:
|
||||
// they are already naturally computed for traditionally-pulled layers, and for partially-pulled layers we
|
||||
// do the optimal partial pull, and then reconstruct the uncompressed tar stream just to (expensively) compute this digest.
|
||||
//
|
||||
// Layers which don’t support partial pulls (inputTOCDigest == "", incl. all schema1 layers) can be let through:
|
||||
// the partial pull code will either not engage, or consume the full layer; and the rules of indexToTOCDigest / layerIdentifiedByTOC
|
||||
// ensure the layer is identified by DiffID, i.e. using the traditional “view”.
|
||||
//
|
||||
// But if inputTOCDigest is set and the input image doesn't have RootFS.DiffIDs (the config is invalid for schema2/OCI),
|
||||
// don't allow a partial pull, and fall back to PutBlobWithOptions.
|
||||
//
|
||||
// (The user can opt out of the DiffID commitment checking by a c/storage option, giving up security for performance,
|
||||
// but we will still trigger the fall back here, and we will still enforce a DiffID match, so that the set of accepted images
|
||||
// is the same in both cases, and so that users are not tempted to set the c/storage option to allow accepting some invalid images.)
|
||||
var untrustedDiffID digest.Digest // "" if unknown
|
||||
udid, err := s.untrustedLayerDiffID(options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// PutBlobPartial is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: in PutBlobPartial, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
if inputTOCDigest != nil {
|
||||
return private.UploadedBlob{}, private.NewErrFallbackToOrdinaryLayerDownload(err)
|
||||
}
|
||||
untrustedDiffID = "" // A schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
untrustedDiffID = udid
|
||||
}
|
||||
|
||||
fetcher := zstdFetcher{
|
||||
chunkAccessor: chunkAccessor,
|
||||
ctx: ctx,
|
||||
@ -351,35 +432,55 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
if out.UncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
// Don’t set indexToTOCDigest on this path:
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers, so we want to set indexToDiffID.
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match any other TOC value.
|
||||
if err := func() error { // A scope for defer
|
||||
defer s.lock.Unlock()
|
||||
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
// For true partial pulls, c/storage decides whether to compute the uncompressed digest based on an option in storage.conf
|
||||
// (defaults to true, to avoid ambiguity.)
|
||||
// c/storage can also be configured, to consume a layer not prepared for partial pulls (primarily to allow composefs conversion),
|
||||
// and in that case it always consumes the full blob and always computes the uncompressed digest.
|
||||
if out.UncompressedDigest != "" {
|
||||
// This is centrally enforced later, in commitLayer, but because we have the value available,
|
||||
// we might just as well check immediately.
|
||||
if untrustedDiffID != "" && out.UncompressedDigest != untrustedDiffID {
|
||||
return fmt.Errorf("uncompressed digest of layer %q is %q, config claims %q", srcInfo.Digest.String(),
|
||||
out.UncompressedDigest.String(), untrustedDiffID.String())
|
||||
}
|
||||
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = out.UncompressedDigest
|
||||
if out.TOCDigest != "" {
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
options.Cache.RecordTOCUncompressedPair(out.TOCDigest, out.UncompressedDigest)
|
||||
}
|
||||
|
||||
// If the whole layer has been consumed, chunked.GetDiffer is responsible for ensuring blobDigest has been validated.
|
||||
if out.CompressedDigest != "" {
|
||||
if out.CompressedDigest != blobDigest {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned CompressedDigest %q not matching expected %q",
|
||||
out.CompressedDigest, blobDigest)
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
}
|
||||
} else {
|
||||
// Sanity-check the defined rules for indexToTOCDigest.
|
||||
if inputTOCDigest == nil {
|
||||
return fmt.Errorf("internal error: PrepareStagedLayer returned a TOC-only identity for layer %q with no TOC digest", srcInfo.Digest.String())
|
||||
}
|
||||
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
}
|
||||
// So, record also information about blobDigest, that might benefit reuse.
|
||||
// We trust PrepareStagedLayer to validate or create both values correctly.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
options.Cache.RecordDigestUncompressedPair(out.CompressedDigest, out.UncompressedDigest)
|
||||
} else {
|
||||
// Use diffID for layer identity if it is known.
|
||||
if uncompressedDigest := options.Cache.UncompressedDigestForTOC(out.TOCDigest); uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
s.lock.Unlock()
|
||||
|
||||
succeeded = true
|
||||
return private.UploadedBlob{
|
||||
@ -417,22 +518,43 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
if err := blobDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
if options.TOCDigest != "" {
|
||||
useTOCDigest := false // If set, (options.TOCDigest != "" && options.LayerIndex != nil) AND we can use options.TOCDigest safely.
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if err := options.TOCDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
// Only consider using TOCDigest if we can avoid ambiguous image “views”, see the detailed comment in PutBlobPartial.
|
||||
_, err := s.untrustedLayerDiffID(*options.LayerIndex)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
// options.TOCDigest is a private API, so all callers are within c/image, and should have called
|
||||
// NoteOriginalOCIConfig first.
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("internal error: in TryReusingBlobWithOptions, untrustedLayerDiffID returned errUntrustedLayerDiffIDNotYetAvailable")
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
logrus.Debugf("Not using TOC %q to look for layer reuse: %v", options.TOCDigest, err)
|
||||
// But don’t abort entirely, keep useTOCDigest = false, try a blobDigest match.
|
||||
default:
|
||||
return false, private.ReusedBlob{}, err
|
||||
}
|
||||
} else {
|
||||
useTOCDigest = true
|
||||
}
|
||||
}
|
||||
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if options.SrcRef != nil && options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if options.SrcRef != nil && useTOCDigest {
|
||||
// Check if we have the layer in the underlying additional layer store.
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(options.TOCDigest, options.SrcRef.String())
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||
} else if err == nil {
|
||||
// Compare the long comment in PutBlobPartial. We assume that the Additional Layer Store will, somehow,
|
||||
// avoid layer “view” ambiguity.
|
||||
alsTOCDigest := aLayer.TOCDigest()
|
||||
if alsTOCDigest != options.TOCDigest {
|
||||
// FIXME: If alsTOCDigest is "", the Additional Layer Store FUSE server is probably just too old, and we could
|
||||
@ -505,13 +627,13 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[reused.Digest] = uncompressedDigest
|
||||
return true, reused, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
if useTOCDigest {
|
||||
// Check if we know which which UncompressedDigest the TOC digest resolves to, and we have a match for that.
|
||||
// Prefer this over LayersByTOCDigest because we can identify the layer using UncompressedDigest, maximizing reuse.
|
||||
uncompressedDigest := options.Cache.UncompressedDigestForTOC(options.TOCDigest)
|
||||
@ -532,6 +654,11 @@ func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Dige
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
|
||||
}
|
||||
if found, reused := reusedBlobFromLayerLookup(layers, blobDigest, size, options); found {
|
||||
if uncompressedDigest == "" && layers[0].UncompressedDigest != "" {
|
||||
// Determine an uncompressed digest if at all possible, to use a traditional image ID
|
||||
// and to maximize image reuse.
|
||||
uncompressedDigest = layers[0].UncompressedDigest
|
||||
}
|
||||
if uncompressedDigest != "" {
|
||||
s.lockProtected.indexToDiffID[*options.LayerIndex] = uncompressedDigest
|
||||
}
|
||||
@ -568,13 +695,22 @@ func reusedBlobFromLayerLookup(layers []storage.Layer, blobDigest digest.Digest,
|
||||
|
||||
// trustedLayerIdentityData is a _consistent_ set of information known about a single layer.
|
||||
type trustedLayerIdentityData struct {
|
||||
layerIdentifiedByTOC bool // true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// true if we decided the layer should be identified by tocDigest, false if by diffID
|
||||
// This can only be true if c/storage/pkg/chunked/toc.GetTOCDigest returns a value.
|
||||
layerIdentifiedByTOC bool
|
||||
|
||||
diffID digest.Digest // A digest of the uncompressed full contents of the layer, or "" if unknown; must be set if !layerIdentifiedByTOC
|
||||
tocDigest digest.Digest // A digest of the TOC digest, or "" if unknown; must be set if layerIdentifiedByTOC
|
||||
blobDigest digest.Digest // A digest of the (possibly-compressed) layer as presented, or "" if unknown/untrusted.
|
||||
}
|
||||
|
||||
// logString() prints a representation of trusted suitable identifying a layer in logs and errors.
|
||||
// The string is already quoted to expose malicious input and does not need to be quoted again.
|
||||
// Note that it does not include _all_ of the contents.
|
||||
func (trusted trustedLayerIdentityData) logString() string {
|
||||
return fmt.Sprintf("%q/%q/%q", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
}
|
||||
|
||||
// trustedLayerIdentityDataLocked returns a _consistent_ set of information for a layer with (layerIndex, blobDigest).
|
||||
// blobDigest is the (possibly-compressed) layer digest referenced in the manifest.
|
||||
// It returns (trusted, true) if the layer was found, or (_, false) if insufficient data is available.
|
||||
@ -785,23 +921,6 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
|
||||
// and an indication whether the input already has the shape of a layer ID.
|
||||
// It returns ("", false) if the layer is not found at all (which should never happen)
|
||||
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(layerIndex, blobDigest)
|
||||
if !ok {
|
||||
return "", false
|
||||
}
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return "@TOC=" + trusted.tocDigest.Encoded(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
return trusted.diffID.Encoded(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
//
|
||||
@ -813,16 +932,15 @@ func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDig
|
||||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
|
||||
// Already committed? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
var parentLayer string
|
||||
var parentLayer string // "" if no parent
|
||||
if index != 0 {
|
||||
// s.indexToStorageID can only be written by this function, and our caller
|
||||
// is responsible for ensuring it can be only be called by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
prev, ok := s.indexToStorageID[index-1]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
|
||||
@ -830,18 +948,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
parentLayer = prev
|
||||
}
|
||||
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = parentLayer
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
// The layerID refers either to the DiffID or the digest of the TOC.
|
||||
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
// Collect trusted parameters of the layer.
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
// Check if the layer exists already and the caller just (incorrectly) forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
//
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
@ -865,23 +982,54 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
}
|
||||
|
||||
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
s.lock.Lock()
|
||||
trusted, ok = s.trustedLayerIdentityDataLocked(index, info.digest)
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
|
||||
}
|
||||
}
|
||||
|
||||
id := layerIDComponent
|
||||
if !layerIDComponentStandalone || parentLayer != "" {
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Encoded()
|
||||
// Ensure that we always see the same “view” of a layer, as identified by the layer’s uncompressed digest,
|
||||
// unless the user has explicitly opted out of this in storage.conf: see the more detailed explanation in PutBlobPartial.
|
||||
if trusted.diffID != "" {
|
||||
untrustedDiffID, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available for DiffID check", index)
|
||||
return true, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we could not have reused a TOC-identified layer nor have done a TOC-identified partial pull,
|
||||
// i.e. there is no other “view” to worry about. Sanity-check that we really see the only expected view.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// we _must_ fail if we used a TOC-identified layer - but PutBlobPartial should have already
|
||||
// refused to do a partial pull, so we are in an inconsistent state.
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
return false, fmt.Errorf("internal error: layer %d for blob %s was identified by TOC, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
}
|
||||
// else a schema1 image or a non-TOC layer with no ambiguity, let it through
|
||||
default:
|
||||
return false, err
|
||||
}
|
||||
} else if trusted.diffID != untrustedDiffID {
|
||||
return false, fmt.Errorf("layer %d (blob %s) does not match config's DiffID %q", index, trusted.logString(), untrustedDiffID)
|
||||
}
|
||||
}
|
||||
|
||||
id := layerID(parentLayer, trusted)
|
||||
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
s.indexToStorageID[index] = layer.ID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
|
||||
layer, err := s.createNewLayer(index, trusted, parentLayer, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -892,32 +1040,62 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
|
||||
// layerID computes a layer (“chain”) ID for (a possibly-empty parentID, trusted)
|
||||
func layerID(parentID string, trusted trustedLayerIdentityData) string {
|
||||
var component string
|
||||
mustHash := false
|
||||
if trusted.layerIdentifiedByTOC {
|
||||
// "@" is not a valid start of a digest.Digest.Encoded(), so this is unambiguous with the !layerIdentifiedByTOC case.
|
||||
// But we _must_ hash this below to get a Digest.Encoded()-formatted value.
|
||||
component = "@TOC=" + trusted.tocDigest.Encoded()
|
||||
mustHash = true
|
||||
} else {
|
||||
component = trusted.diffID.Encoded() // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
|
||||
if parentID == "" && !mustHash {
|
||||
return component
|
||||
}
|
||||
return digest.Canonical.FromString(parentID + "+" + component).Encoded()
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, trusted) on top of parentLayer (which may be "").
|
||||
// If the layer cannot be committed yet, the function returns (nil, nil).
|
||||
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
func (s *storageImageDestination) createNewLayer(index int, trusted trustedLayerIdentityData, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.lockProtected.diffOutputs[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
// If we know a trusted DiffID value (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// Typically, we compute a trusted DiffID value to authenticate the layer contents, see the detailed explanation
|
||||
// in PutBlobPartial. If the user has opted out of that, but we know a trusted DiffID value
|
||||
// (e.g. from a BlobInfoCache), set it in diffOutput.
|
||||
// That way it will be persisted in storage even if the cache is deleted; also
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic (and notably
|
||||
// the costly commit delay until a manifest is available).
|
||||
s.lock.Lock()
|
||||
if d, ok := s.lockProtected.indexToDiffID[index]; ok {
|
||||
diffOutput.UncompressedDigest = d
|
||||
// we can use the value below to avoid the untrustedUncompressedDigest logic.
|
||||
if diffOutput.UncompressedDigest == "" && trusted.diffID != "" {
|
||||
diffOutput.UncompressedDigest = trusted.diffID
|
||||
}
|
||||
s.lock.Unlock()
|
||||
|
||||
var untrustedUncompressedDigest digest.Digest
|
||||
if diffOutput.UncompressedDigest == "" {
|
||||
d, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == "" {
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
var diffIDUnknownErr untrustedLayerDiffIDUnknownError
|
||||
switch {
|
||||
case errors.Is(err, errUntrustedLayerDiffIDNotYetAvailable):
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
case errors.As(err, &diffIDUnknownErr):
|
||||
// If untrustedLayerDiffIDUnknownError, the input image is schema1, has no TOC annotations,
|
||||
// so we should have !trusted.layerIdentifiedByTOC, i.e. we should have set
|
||||
// diffOutput.UncompressedDigest above in this function, at the very latest.
|
||||
//
|
||||
// Or, maybe, the input image is OCI, and has invalid/missing DiffID values in config. In that case
|
||||
// commitLayer should have already refused this image when dealing with the “view” ambiguity.
|
||||
return nil, fmt.Errorf("internal error: layer %d for blob %s was partially-pulled with unknown UncompressedDigest, but we don't have a DiffID in config",
|
||||
index, trusted.logString())
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
untrustedUncompressedDigest = d
|
||||
@ -965,19 +1143,17 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// then we need to read the desired contents from a layer.
|
||||
var filename string
|
||||
var gotFilename bool
|
||||
s.lock.Lock()
|
||||
trusted, ok := s.trustedLayerIdentityDataLocked(index, layerDigest)
|
||||
if ok && trusted.blobDigest != "" {
|
||||
if trusted.blobDigest != "" {
|
||||
s.lock.Lock()
|
||||
filename, gotFilename = s.lockProtected.filenames[trusted.blobDigest]
|
||||
}
|
||||
s.lock.Unlock()
|
||||
if !ok { // We have already determined newLayerID, so the data must have been available.
|
||||
return nil, fmt.Errorf("internal inconsistency: layer (%d, %q) not found", index, layerDigest)
|
||||
s.lock.Unlock()
|
||||
}
|
||||
var trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
var trustedOriginalSize *int64
|
||||
if gotFilename {
|
||||
// The code setting .filenames[trusted.blobDigest] is responsible for ensuring that the file contents match trusted.blobDigest.
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
trustedOriginalSize = nil // It’s s.lockProtected.fileSizes[trusted.blobDigest], but we don’t hold the lock now, and the consumer can compute it at trivial cost.
|
||||
} else {
|
||||
// Try to find the layer with contents matching the data we use.
|
||||
var layer *storage.Layer // = nil
|
||||
@ -997,7 +1173,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("layer for blob %q/%q/%q not found", trusted.blobDigest, trusted.tocDigest, trusted.diffID)
|
||||
return nil, fmt.Errorf("layer for blob %s not found", trusted.logString())
|
||||
}
|
||||
|
||||
// Read the layer's contents.
|
||||
@ -1007,7 +1183,7 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q/%q/%q: %w", layer.ID, trusted.blobDigest, trusted.tocDigest, trusted.diffID, err2)
|
||||
return nil, fmt.Errorf("reading layer %q for blob %s: %w", layer.ID, trusted.logString(), err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
@ -1032,22 +1208,36 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
if trusted.diffID == "" && layer.UncompressedDigest != "" {
|
||||
trusted.diffID = layer.UncompressedDigest // This data might have been unavailable in tryReusingBlobAsPending, and is only known now.
|
||||
}
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
|
||||
// Set the layer’s CompressedDigest/CompressedSize to relevant values if known, to allow more layer reuse.
|
||||
// But we don’t want to just use the size from the manifest if we never saw the compressed blob,
|
||||
// so that we don’t propagate mistakes / attacks.
|
||||
//
|
||||
// FIXME? trustedOriginalDigest could be set to trusted.blobDigest if known, to allow more layer reuse.
|
||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
// s.lockProtected.fileSizes[trusted.blobDigest] is not set, otherwise we would have found gotFilename.
|
||||
// So, check if the layer we found contains that metadata. (If that layer continues to exist, there’s no benefit
|
||||
// to us propagating the metadata; but that layer could be removed, and in that case propagating the metadata to
|
||||
// this new layer copy can help.)
|
||||
if trusted.blobDigest != "" && layer.CompressedDigest == trusted.blobDigest && layer.CompressedSize > 0 {
|
||||
trustedOriginalDigest = trusted.blobDigest
|
||||
sizeCopy := layer.CompressedSize
|
||||
trustedOriginalSize = &sizeCopy
|
||||
} else {
|
||||
// The stream we have is uncompressed, and it matches trusted.diffID (if known).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == trusted.diffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = trusted.diffID
|
||||
trustedOriginalSize = nil // Probably layer.UncompressedSize, but the consumer can compute it at trivial cost.
|
||||
}
|
||||
|
||||
// Allow using the already-collected layer contents without extracting the layer again.
|
||||
//
|
||||
// This only matches against the uncompressed digest.
|
||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
||||
// If we have trustedOriginalDigest == trusted.blobDigest, we could arrange to reuse the
|
||||
// same uncompressed stream for future calls of createNewLayer; but for the non-layer blobs (primarily the config),
|
||||
// we assume that the file at filenames[someDigest] matches someDigest _exactly_; we would need to differentiate
|
||||
// between “original files” and “possibly uncompressed files”.
|
||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||
if trusted.diffID != "" {
|
||||
s.lock.Lock()
|
||||
@ -1067,55 +1257,128 @@ func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.D
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
OriginalSize: trustedOriginalSize, // nil in many cases
|
||||
// This might be "" if trusted.layerIdentifiedByTOC; in that case PutLayer will compute the value from the stream.
|
||||
UncompressedDigest: trusted.diffID,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return nil, fmt.Errorf("adding layer with blob %q/%q/%q: %w", trusted.blobDigest, trusted.tocDigest, trusted.diffID, err)
|
||||
return nil, fmt.Errorf("adding layer with blob %s: %w", trusted.logString(), err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
||||
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
|
||||
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
if s.manifest == nil {
|
||||
return "", nil
|
||||
// uncommittedImageSource allows accessing an image’s metadata (not layers) before it has been committed,
|
||||
// to allow using image.FromUnparsedImage.
|
||||
type uncommittedImageSource struct {
|
||||
srcImpl.Compat
|
||||
srcImpl.PropertyMethodsInitialize
|
||||
srcImpl.NoSignatures
|
||||
srcImpl.DoesNotAffectLayerInfosForCopy
|
||||
srcStubs.NoGetBlobAtInitialize
|
||||
|
||||
d *storageImageDestination
|
||||
}
|
||||
|
||||
func newUncommittedImageSource(d *storageImageDestination) *uncommittedImageSource {
|
||||
s := &uncommittedImageSource{
|
||||
PropertyMethodsInitialize: srcImpl.PropertyMethods(srcImpl.Properties{
|
||||
HasThreadSafeGetBlob: true,
|
||||
}),
|
||||
NoGetBlobAtInitialize: srcStubs.NoGetBlobAt(d.Reference()),
|
||||
|
||||
d: d,
|
||||
}
|
||||
s.Compat = srcImpl.AddCompat(s)
|
||||
return s
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Reference() types.ImageReference {
|
||||
return u.d.Reference()
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
||||
return u.d.manifest, u.d.manifestMIMEType, nil
|
||||
}
|
||||
|
||||
func (u *uncommittedImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
blob, err := u.d.getConfigBlob(info)
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
}
|
||||
return io.NopCloser(bytes.NewReader(blob)), int64(len(blob)), nil
|
||||
}
|
||||
|
||||
// errUntrustedLayerDiffIDNotYetAvailable is returned by untrustedLayerDiffID
|
||||
// if the value is not yet available (but it can be available after s.manifests is set).
|
||||
// This should only happen for external callers of the transport, not for c/image/copy.
|
||||
//
|
||||
// Callers of untrustedLayerDiffID before PutManifest must handle this error specially;
|
||||
// callers after PutManifest can use the default, reporting an internal error.
|
||||
var errUntrustedLayerDiffIDNotYetAvailable = errors.New("internal error: untrustedLayerDiffID has no value available and fallback was not implemented")
|
||||
|
||||
// untrustedLayerDiffIDUnknownError is returned by untrustedLayerDiffID
|
||||
// if the image’s format does not provide DiffIDs.
|
||||
type untrustedLayerDiffIDUnknownError struct {
|
||||
layerIndex int
|
||||
}
|
||||
|
||||
func (e untrustedLayerDiffIDUnknownError) Error() string {
|
||||
return fmt.Sprintf("DiffID value for layer %d is unknown or explicitly empty", e.layerIndex)
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// It may return two special errors, errUntrustedLayerDiffIDNotYetAvailable or untrustedLayerDiffIDUnknownError.
|
||||
//
|
||||
// WARNING: This function does not even validate that the returned digest has a valid format.
|
||||
// WARNING: We don’t _always_ validate this DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob,
|
||||
// nothing is writing to s.manifest yet, and s.untrustedDiffIDValues might have been set
|
||||
// by NoteOriginalOCIConfig and are not being updated any more;
|
||||
// or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
|
||||
if s.untrustedDiffIDValues == nil {
|
||||
mt := manifest.GuessMIMEType(s.manifest)
|
||||
if mt != imgspecv1.MediaTypeImageManifest {
|
||||
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
|
||||
// and then use types.Image.OCIConfig so that we can parse the image.
|
||||
//
|
||||
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
|
||||
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
|
||||
// So it is not implemented yet.
|
||||
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
|
||||
}
|
||||
man, err := manifest.FromBlob(s.manifest, mt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing manifest: %w", err)
|
||||
// Typically, we expect untrustedDiffIDValues to be set by the generic copy code
|
||||
// via NoteOriginalOCIConfig; this is a compatibility fallback for external callers
|
||||
// of the public types.ImageDestination.
|
||||
if s.manifest == nil {
|
||||
return "", errUntrustedLayerDiffIDNotYetAvailable
|
||||
}
|
||||
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
ctx := context.Background() // This is all happening in memory, no need to worry about cancellation.
|
||||
unparsed := image.UnparsedInstance(newUncommittedImageSource(s), nil)
|
||||
sourced, err := image.FromUnparsedImage(ctx, nil, unparsed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", fmt.Errorf("parsing image to be committed: %w", err)
|
||||
}
|
||||
configOCI, err := sourced.OCIConfig(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("obtaining config of image to be committed: %w", err)
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return "", err
|
||||
}
|
||||
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
s.setUntrustedDiffIDValuesFromOCIConfig(configOCI)
|
||||
}
|
||||
|
||||
// Let entirely empty / missing diffIDs through; but if the array does exist, expect it to contain an entry for every layer,
|
||||
// and fail hard on missing entries. This tries to account for completely naive image producers who just don’t fill DiffID,
|
||||
// while still detecting incorrectly-built / confused images.
|
||||
//
|
||||
// schema1 images don’t have DiffID values in the config.
|
||||
// Our schema1.OCIConfig code produces non-empty DiffID arrays of empty values, so treat arrays of all-empty
|
||||
// values as “DiffID unknown”.
|
||||
// For schema 1, it is important to exit here, before the layerIndex >= len(s.untrustedDiffIDValues)
|
||||
// check, because the format conversion from schema1 to OCI used to compute untrustedDiffIDValues
|
||||
// changes the number of layres (drops items with Schema1V1Compatibility.ThrowAway).
|
||||
if !slices.ContainsFunc(s.untrustedDiffIDValues, func(d digest.Digest) bool {
|
||||
return d != ""
|
||||
}) {
|
||||
return "", untrustedLayerDiffIDUnknownError{
|
||||
layerIndex: layerIndex,
|
||||
}
|
||||
}
|
||||
if layerIndex >= len(s.untrustedDiffIDValues) {
|
||||
@ -1124,6 +1387,15 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||
return s.untrustedDiffIDValues[layerIndex], nil
|
||||
}
|
||||
|
||||
// setUntrustedDiffIDValuesFromOCIConfig updates s.untrustedDiffIDvalues from config.
|
||||
// The caller must ensure s.lock does not need to be held.
|
||||
func (s *storageImageDestination) setUntrustedDiffIDValuesFromOCIConfig(config *imgspecv1.Image) {
|
||||
s.untrustedDiffIDValues = slices.Clone(config.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
}
|
||||
}
|
||||
|
||||
// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
// WARNING: This does not have any transactional semantics:
|
||||
// - Uploaded data MAY be visible to others before CommitWithOptions() is called
|
||||
@ -1131,7 +1403,7 @@ func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.D
|
||||
func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error {
|
||||
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
if len(s.manifest) == 0 {
|
||||
if s.manifest == nil {
|
||||
return errors.New("Internal error: storageImageDestination.CommitWithOptions() called without PutManifest()")
|
||||
}
|
||||
toplevelManifest, _, err := options.UnparsedToplevel.Manifest(ctx)
|
||||
@ -1159,7 +1431,7 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||
}
|
||||
}
|
||||
// Find the list of layer blobs.
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
man, err := manifest.FromBlob(s.manifest, s.manifestMIMEType)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
@ -1193,29 +1465,21 @@ func (s *storageImageDestination) CommitWithOptions(ctx context.Context, options
|
||||
imgOptions.CreationDate = *inspect.Created
|
||||
}
|
||||
|
||||
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
||||
dataBlobs := set.New[digest.Digest]()
|
||||
for blob := range s.lockProtected.filenames {
|
||||
dataBlobs.Add(blob)
|
||||
}
|
||||
for _, layerBlob := range layerBlobs {
|
||||
dataBlobs.Delete(layerBlob.Digest)
|
||||
}
|
||||
for _, blob := range dataBlobs.Values() {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[blob])
|
||||
// Set up to save the config as a data item. Since we only share layers, the config should be in a file.
|
||||
if s.lockProtected.configDigest != "" {
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[s.lockProtected.configDigest])
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
|
||||
return fmt.Errorf("copying config blob %q to image: %w", s.lockProtected.configDigest, err)
|
||||
}
|
||||
imgOptions.BigData = append(imgOptions.BigData, storage.ImageBigDataOption{
|
||||
Key: blob.String(),
|
||||
Key: s.lockProtected.configDigest.String(),
|
||||
Data: v,
|
||||
Digest: digest.Canonical.FromBytes(v),
|
||||
})
|
||||
}
|
||||
// Set up to save the options.UnparsedToplevel's manifest if it differs from
|
||||
// the per-platform one, which is saved below.
|
||||
if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
if !bytes.Equal(toplevelManifest, s.manifest) {
|
||||
manifestDigest, err := manifest.Digest(toplevelManifest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("digesting top-level manifest: %w", err)
|
||||
@ -1370,6 +1634,10 @@ func (s *storageImageDestination) PutManifest(ctx context.Context, manifestBlob
|
||||
return err
|
||||
}
|
||||
s.manifest = bytes.Clone(manifestBlob)
|
||||
if s.manifest == nil { // Make sure PutManifest can never succeed with s.manifest == nil
|
||||
s.manifest = []byte{}
|
||||
}
|
||||
s.manifestMIMEType = manifest.GuessMIMEType(s.manifest)
|
||||
s.manifestDigest = digest
|
||||
return nil
|
||||
}
|
||||
@ -1392,7 +1660,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
||||
if instanceDigest == nil {
|
||||
s.signatures = sigblob
|
||||
s.metadata.SignatureSizes = sizes
|
||||
if len(s.manifest) > 0 {
|
||||
if s.manifest != nil {
|
||||
manifestDigest := s.manifestDigest
|
||||
instanceDigest = &manifestDigest
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
4
vendor/github.com/containers/image/v5/storage/storage_reference.go
generated
vendored
@ -153,7 +153,9 @@ func (s *storageReference) resolveImage(sys *types.SystemContext) (*storage.Imag
|
||||
}
|
||||
if s.id == "" {
|
||||
logrus.Debugf("reference %q does not resolve to an image ID", s.StringWithinTransport())
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID: %w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
// %.0w makes the error visible to error.Unwrap() without including any text.
|
||||
// ErrNoSuchImage ultimately is “identifier is not an image”, which is not helpful for identifying the root cause.
|
||||
return nil, fmt.Errorf("reference %q does not resolve to an image ID%.0w", s.StringWithinTransport(), ErrNoSuchImage)
|
||||
}
|
||||
if loadedImage == nil {
|
||||
img, err := s.transport.store.Image(s.id)
|
||||
|
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
22
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
@ -35,13 +35,14 @@ type storageImageSource struct {
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.NoGetBlobAtInitialize
|
||||
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
cachedManifestMIMEType string // Valid if cachedManifest != nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
}
|
||||
|
||||
// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
|
||||
@ -247,7 +248,7 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
}
|
||||
return blob, manifest.GuessMIMEType(blob), err
|
||||
}
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
// The manifest is stored as a big data item.
|
||||
// Prefer the manifest corresponding to the user-specified digest, if available.
|
||||
if s.imageRef.named != nil {
|
||||
@ -267,15 +268,16 @@ func (s *storageImageSource) GetManifest(ctx context.Context, instanceDigest *di
|
||||
}
|
||||
// If the user did not specify a digest, or this is an old image stored before manifestBigDataKey was introduced, use the default manifest.
|
||||
// Note that the manifest may not match the expected digest, and that is likely to fail eventually, e.g. in c/image/image/UnparsedImage.Manifest().
|
||||
if len(s.cachedManifest) == 0 {
|
||||
if s.cachedManifest == nil {
|
||||
cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.image.ID, storage.ImageDigestBigDataKey)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
s.cachedManifest = cachedBlob
|
||||
}
|
||||
s.cachedManifestMIMEType = manifest.GuessMIMEType(s.cachedManifest)
|
||||
}
|
||||
return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err
|
||||
return s.cachedManifest, s.cachedManifestMIMEType, err
|
||||
}
|
||||
|
||||
// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -6,9 +6,9 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 5
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 33
|
||||
VersionMinor = 34
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 0
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
|
50
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
50
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
@ -154,40 +154,65 @@ var supportedAlgorithms = map[string]bool{
|
||||
EdDSA: true,
|
||||
}
|
||||
|
||||
// ProviderConfig allows creating providers when discovery isn't supported. It's
|
||||
// generally easier to use NewProvider directly.
|
||||
// ProviderConfig allows direct creation of a [Provider] from metadata
|
||||
// configuration. This is intended for interop with providers that don't support
|
||||
// discovery, or host the JSON discovery document at an off-spec path.
|
||||
//
|
||||
// The ProviderConfig struct specifies JSON struct tags to support document
|
||||
// parsing.
|
||||
//
|
||||
// // Directly fetch the metadata document.
|
||||
// resp, err := http.Get("https://login.example.com/custom-metadata-path")
|
||||
// if err != nil {
|
||||
// // ...
|
||||
// }
|
||||
// defer resp.Body.Close()
|
||||
//
|
||||
// // Parse config from JSON metadata.
|
||||
// config := &oidc.ProviderConfig{}
|
||||
// if err := json.NewDecoder(resp.Body).Decode(config); err != nil {
|
||||
// // ...
|
||||
// }
|
||||
// p := config.NewProvider(context.Background())
|
||||
//
|
||||
// For providers that implement discovery, use [NewProvider] instead.
|
||||
//
|
||||
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
|
||||
type ProviderConfig struct {
|
||||
// IssuerURL is the identity of the provider, and the string it uses to sign
|
||||
// ID tokens with. For example "https://accounts.google.com". This value MUST
|
||||
// match ID tokens exactly.
|
||||
IssuerURL string
|
||||
IssuerURL string `json:"issuer"`
|
||||
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// authorization endpoint.
|
||||
AuthURL string
|
||||
AuthURL string `json:"authorization_endpoint"`
|
||||
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// token endpoint.
|
||||
TokenURL string
|
||||
TokenURL string `json:"token_endpoint"`
|
||||
// DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// device authorization endpoint.
|
||||
DeviceAuthURL string
|
||||
DeviceAuthURL string `json:"device_authorization_endpoint"`
|
||||
// UserInfoURL is the endpoint used by the provider to support the OpenID
|
||||
// Connect UserInfo flow.
|
||||
//
|
||||
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
|
||||
UserInfoURL string
|
||||
UserInfoURL string `json:"userinfo_endpoint"`
|
||||
// JWKSURL is the endpoint used by the provider to advertise public keys to
|
||||
// verify issued ID tokens. This endpoint is polled as new keys are made
|
||||
// available.
|
||||
JWKSURL string
|
||||
JWKSURL string `json:"jwks_uri"`
|
||||
|
||||
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
|
||||
// ID tokens. If not provided, this defaults to the algorithms advertised by
|
||||
// the JWK endpoint, then the set of algorithms supported by this package.
|
||||
Algorithms []string
|
||||
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||
}
|
||||
|
||||
// NewProvider initializes a provider from a set of endpoints, rather than
|
||||
// through discovery.
|
||||
//
|
||||
// The provided context is only used for [http.Client] configuration through
|
||||
// [ClientContext], not cancelation.
|
||||
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
|
||||
return &Provider{
|
||||
issuer: p.IssuerURL,
|
||||
@ -202,9 +227,14 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
|
||||
}
|
||||
|
||||
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
|
||||
//
|
||||
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
|
||||
// or "https://login.salesforce.com".
|
||||
//
|
||||
// OpenID Connect providers that don't implement discovery or host the discovery
|
||||
// document at a non-spec complaint path (such as requiring a URL parameter),
|
||||
// should use [ProviderConfig] instead.
|
||||
//
|
||||
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
|
||||
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
|
||||
req, err := http.NewRequest("GET", wellKnown, nil)
|
||||
|
27
vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
27
vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package challenge
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FROM: https://golang.org/src/net/http/http.go
|
||||
// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
|
||||
// return true if the string includes a port.
|
||||
func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
|
||||
|
||||
// FROM: http://golang.org/src/net/http/transport.go
|
||||
var portMap = map[string]string{
|
||||
"http": "80",
|
||||
"https": "443",
|
||||
}
|
||||
|
||||
// canonicalAddr returns url.Host but always with a ":port" suffix
|
||||
// FROM: http://golang.org/src/net/http/transport.go
|
||||
func canonicalAddr(url *url.URL) string {
|
||||
addr := url.Host
|
||||
if !hasPort(addr) {
|
||||
return addr + ":" + portMap[url.Scheme]
|
||||
}
|
||||
return addr
|
||||
}
|
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
237
vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go
generated
vendored
@ -1,237 +0,0 @@
|
||||
package challenge
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Challenge carries information from a WWW-Authenticate response header.
|
||||
// See RFC 2617.
|
||||
type Challenge struct {
|
||||
// Scheme is the auth-scheme according to RFC 2617
|
||||
Scheme string
|
||||
|
||||
// Parameters are the auth-params according to RFC 2617
|
||||
Parameters map[string]string
|
||||
}
|
||||
|
||||
// Manager manages the challenges for endpoints.
|
||||
// The challenges are pulled out of HTTP responses. Only
|
||||
// responses which expect challenges should be added to
|
||||
// the manager, since a non-unauthorized request will be
|
||||
// viewed as not requiring challenges.
|
||||
type Manager interface {
|
||||
// GetChallenges returns the challenges for the given
|
||||
// endpoint URL.
|
||||
GetChallenges(endpoint url.URL) ([]Challenge, error)
|
||||
|
||||
// AddResponse adds the response to the challenge
|
||||
// manager. The challenges will be parsed out of
|
||||
// the WWW-Authenicate headers and added to the
|
||||
// URL which was produced the response. If the
|
||||
// response was authorized, any challenges for the
|
||||
// endpoint will be cleared.
|
||||
AddResponse(resp *http.Response) error
|
||||
}
|
||||
|
||||
// NewSimpleManager returns an instance of
|
||||
// Manger which only maps endpoints to challenges
|
||||
// based on the responses which have been added the
|
||||
// manager. The simple manager will make no attempt to
|
||||
// perform requests on the endpoints or cache the responses
|
||||
// to a backend.
|
||||
func NewSimpleManager() Manager {
|
||||
return &simpleManager{
|
||||
Challenges: make(map[string][]Challenge),
|
||||
}
|
||||
}
|
||||
|
||||
type simpleManager struct {
|
||||
sync.RWMutex
|
||||
Challenges map[string][]Challenge
|
||||
}
|
||||
|
||||
func normalizeURL(endpoint *url.URL) {
|
||||
endpoint.Host = strings.ToLower(endpoint.Host)
|
||||
endpoint.Host = canonicalAddr(endpoint)
|
||||
}
|
||||
|
||||
func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) {
|
||||
normalizeURL(&endpoint)
|
||||
|
||||
m.RLock()
|
||||
defer m.RUnlock()
|
||||
challenges := m.Challenges[endpoint.String()]
|
||||
return challenges, nil
|
||||
}
|
||||
|
||||
func (m *simpleManager) AddResponse(resp *http.Response) error {
|
||||
challenges := ResponseChallenges(resp)
|
||||
if resp.Request == nil {
|
||||
return fmt.Errorf("missing request reference")
|
||||
}
|
||||
urlCopy := url.URL{
|
||||
Path: resp.Request.URL.Path,
|
||||
Host: resp.Request.URL.Host,
|
||||
Scheme: resp.Request.URL.Scheme,
|
||||
}
|
||||
normalizeURL(&urlCopy)
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.Challenges[urlCopy.String()] = challenges
|
||||
return nil
|
||||
}
|
||||
|
||||
// Octet types from RFC 2616.
|
||||
type octetType byte
|
||||
|
||||
var octetTypes [256]octetType
|
||||
|
||||
const (
|
||||
isToken octetType = 1 << iota
|
||||
isSpace
|
||||
)
|
||||
|
||||
func init() {
|
||||
// OCTET = <any 8-bit sequence of data>
|
||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
||||
// CR = <US-ASCII CR, carriage return (13)>
|
||||
// LF = <US-ASCII LF, linefeed (10)>
|
||||
// SP = <US-ASCII SP, space (32)>
|
||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
||||
// <"> = <US-ASCII double-quote mark (34)>
|
||||
// CRLF = CR LF
|
||||
// LWS = [CRLF] 1*( SP | HT )
|
||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
||||
// token = 1*<any CHAR except CTLs or separators>
|
||||
// qdtext = <any TEXT except <">>
|
||||
|
||||
for c := 0; c < 256; c++ {
|
||||
var t octetType
|
||||
isCtl := c <= 31 || c == 127
|
||||
isChar := 0 <= c && c <= 127
|
||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
||||
t |= isSpace
|
||||
}
|
||||
if isChar && !isCtl && !isSeparator {
|
||||
t |= isToken
|
||||
}
|
||||
octetTypes[c] = t
|
||||
}
|
||||
}
|
||||
|
||||
// ResponseChallenges returns a list of authorization challenges
|
||||
// for the given http Response. Challenges are only checked if
|
||||
// the response status code was a 401.
|
||||
func ResponseChallenges(resp *http.Response) []Challenge {
|
||||
if resp.StatusCode == http.StatusUnauthorized {
|
||||
// Parse the WWW-Authenticate Header and store the challenges
|
||||
// on this endpoint object.
|
||||
return parseAuthHeader(resp.Header)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAuthHeader(header http.Header) []Challenge {
|
||||
challenges := []Challenge{}
|
||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
||||
v, p := parseValueAndParams(h)
|
||||
if v != "" {
|
||||
challenges = append(challenges, Challenge{Scheme: v, Parameters: p})
|
||||
}
|
||||
}
|
||||
return challenges
|
||||
}
|
||||
|
||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
||||
params = make(map[string]string)
|
||||
value, s := expectToken(header)
|
||||
if value == "" {
|
||||
return
|
||||
}
|
||||
value = strings.ToLower(value)
|
||||
s = "," + skipSpace(s)
|
||||
for strings.HasPrefix(s, ",") {
|
||||
var pkey string
|
||||
pkey, s = expectToken(skipSpace(s[1:]))
|
||||
if pkey == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(s, "=") {
|
||||
return
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func skipSpace(s string) (rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isSpace == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[i:]
|
||||
}
|
||||
|
||||
func expectToken(s string) (token, rest string) {
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if octetTypes[s[i]]&isToken == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return s[:i], s[i:]
|
||||
}
|
||||
|
||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
||||
if !strings.HasPrefix(s, "\"") {
|
||||
return expectToken(s)
|
||||
}
|
||||
s = s[1:]
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch s[i] {
|
||||
case '"':
|
||||
return s[:i], s[i+1:]
|
||||
case '\\':
|
||||
p := make([]byte, len(s)-1)
|
||||
j := copy(p, s[:i])
|
||||
escape := true
|
||||
for i = i + 1; i < len(s); i++ {
|
||||
b := s[i]
|
||||
switch {
|
||||
case escape:
|
||||
escape = false
|
||||
p[j] = b
|
||||
j++
|
||||
case b == '\\':
|
||||
escape = true
|
||||
case b == '"':
|
||||
return string(p[:j]), s[i+1:]
|
||||
default:
|
||||
p[j] = b
|
||||
j++
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
||||
}
|
||||
return "", ""
|
||||
}
|
32
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
32
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
@ -1195,6 +1195,7 @@ definitions:
|
||||
- "default"
|
||||
- "process"
|
||||
- "hyperv"
|
||||
- ""
|
||||
MaskedPaths:
|
||||
type: "array"
|
||||
description: |
|
||||
@ -4180,6 +4181,7 @@ definitions:
|
||||
- "default"
|
||||
- "process"
|
||||
- "hyperv"
|
||||
- ""
|
||||
Init:
|
||||
description: |
|
||||
Run an init inside the container that forwards signals and reaps
|
||||
@ -5750,6 +5752,7 @@ definitions:
|
||||
- "default"
|
||||
- "hyperv"
|
||||
- "process"
|
||||
- ""
|
||||
InitBinary:
|
||||
description: |
|
||||
Name and, optional, path of the `docker-init` binary.
|
||||
@ -5820,8 +5823,6 @@ definitions:
|
||||
type: "string"
|
||||
example:
|
||||
- "WARNING: No memory limit support"
|
||||
- "WARNING: bridge-nf-call-iptables is disabled"
|
||||
- "WARNING: bridge-nf-call-ip6tables is disabled"
|
||||
CDISpecDirs:
|
||||
description: |
|
||||
List of directories where (Container Device Interface) CDI
|
||||
@ -7876,10 +7877,12 @@ paths:
|
||||
type: "string"
|
||||
- name: "h"
|
||||
in: "query"
|
||||
required: true
|
||||
description: "Height of the TTY session in characters"
|
||||
type: "integer"
|
||||
- name: "w"
|
||||
in: "query"
|
||||
required: true
|
||||
description: "Width of the TTY session in characters"
|
||||
type: "integer"
|
||||
tags: ["Container"]
|
||||
@ -9244,6 +9247,19 @@ paths:
|
||||
all tags of the given image that are present in the local image store
|
||||
are pushed.
|
||||
type: "string"
|
||||
- name: "platform"
|
||||
type: "string"
|
||||
in: "query"
|
||||
description: |
|
||||
JSON-encoded OCI platform to select the platform-variant to push.
|
||||
If not provided, all available variants will attempt to be pushed.
|
||||
|
||||
If the daemon provides a multi-platform image store, this selects
|
||||
the platform-variant to push to the registry. If the image is
|
||||
a single-platform image, or if the multi-platform image does not
|
||||
provide a variant matching the given platform, an error is returned.
|
||||
|
||||
Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}`
|
||||
- name: "X-Registry-Auth"
|
||||
in: "header"
|
||||
description: |
|
||||
@ -9253,11 +9269,6 @@ paths:
|
||||
details.
|
||||
type: "string"
|
||||
required: true
|
||||
- name: "platform"
|
||||
in: "query"
|
||||
description: "Select a platform-specific manifest to be pushed. OCI platform (JSON encoded)"
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
tags: ["Image"]
|
||||
/images/{name}/tag:
|
||||
post:
|
||||
@ -9553,7 +9564,7 @@ paths:
|
||||
type: "string"
|
||||
example: "OK"
|
||||
headers:
|
||||
API-Version:
|
||||
Api-Version:
|
||||
type: "string"
|
||||
description: "Max API Version the server supports"
|
||||
Builder-Version:
|
||||
@ -9609,7 +9620,7 @@ paths:
|
||||
type: "string"
|
||||
example: "(empty)"
|
||||
headers:
|
||||
API-Version:
|
||||
Api-Version:
|
||||
type: "string"
|
||||
description: "Max API Version the server supports"
|
||||
Builder-Version:
|
||||
@ -10203,10 +10214,12 @@ paths:
|
||||
type: "string"
|
||||
- name: "h"
|
||||
in: "query"
|
||||
required: true
|
||||
description: "Height of the TTY session in characters"
|
||||
type: "integer"
|
||||
- name: "w"
|
||||
in: "query"
|
||||
required: true
|
||||
description: "Width of the TTY session in characters"
|
||||
type: "integer"
|
||||
tags: ["Exec"]
|
||||
@ -11622,6 +11635,7 @@ paths:
|
||||
example:
|
||||
ListenAddr: "0.0.0.0:2377"
|
||||
AdvertiseAddr: "192.168.1.1:2377"
|
||||
DataPathAddr: "192.168.1.1"
|
||||
RemoteAddrs:
|
||||
- "node1:2377"
|
||||
JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2"
|
||||
|
2
vendor/github.com/docker/docker/api/types/container/hostconfig.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/container/hostconfig.go
generated
vendored
@ -10,7 +10,7 @@ import (
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
|
||||
// CgroupnsMode represents the cgroup namespace mode of the container
|
||||
|
2
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
@ -484,4 +484,6 @@ type BuildCachePruneOptions struct {
|
||||
All bool
|
||||
KeepStorage int64
|
||||
Filters filters.Args
|
||||
|
||||
// FIXME(thaJeztah): add new options; see https://github.com/moby/moby/issues/48639
|
||||
}
|
||||
|
10
vendor/github.com/docker/docker/client/client.go
generated
vendored
10
vendor/github.com/docker/docker/client/client.go
generated
vendored
@ -2,7 +2,7 @@
|
||||
Package client is a Go client for the Docker Engine API.
|
||||
|
||||
For more information about the Engine API, see the documentation:
|
||||
https://docs.docker.com/engine/api/
|
||||
https://docs.docker.com/reference/api/engine/
|
||||
|
||||
# Usage
|
||||
|
||||
@ -247,6 +247,14 @@ func (cli *Client) tlsConfig() *tls.Config {
|
||||
|
||||
func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
|
||||
transport := &http.Transport{}
|
||||
// Necessary to prevent long-lived processes using the
|
||||
// client from leaking connections due to idle connections
|
||||
// not being released.
|
||||
// TODO: see if we can also address this from the server side,
|
||||
// or in go-connections.
|
||||
// see: https://github.com/moby/moby/issues/45539
|
||||
transport.MaxIdleConns = 6
|
||||
transport.IdleConnTimeout = 30 * time.Second
|
||||
err := sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
4
vendor/github.com/docker/docker/client/ping.go
generated
vendored
4
vendor/github.com/docker/docker/client/ping.go
generated
vendored
@ -56,8 +56,8 @@ func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) {
|
||||
err := cli.checkResponseErr(resp)
|
||||
return ping, errdefs.FromStatusCode(err, resp.statusCode)
|
||||
}
|
||||
ping.APIVersion = resp.header.Get("API-Version")
|
||||
ping.OSType = resp.header.Get("OSType")
|
||||
ping.APIVersion = resp.header.Get("Api-Version")
|
||||
ping.OSType = resp.header.Get("Ostype")
|
||||
if resp.header.Get("Docker-Experimental") == "true" {
|
||||
ping.Experimental = true
|
||||
}
|
||||
|
2
vendor/github.com/proglottis/gpgme/.gitignore
generated
vendored
2
vendor/github.com/proglottis/gpgme/.gitignore
generated
vendored
@ -1 +1,3 @@
|
||||
testdata/gpghome/random_seed
|
||||
testdata/gpghome/.gpg-v21-migrated
|
||||
testdata/gpghome/private-keys-v1.d/
|
||||
|
42
vendor/github.com/proglottis/gpgme/callbacks.go
generated
vendored
42
vendor/github.com/proglottis/gpgme/callbacks.go
generated
vendored
@ -1,42 +0,0 @@
|
||||
package gpgme
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var callbacks struct {
|
||||
sync.Mutex
|
||||
m map[uintptr]interface{}
|
||||
c uintptr
|
||||
}
|
||||
|
||||
func callbackAdd(v interface{}) uintptr {
|
||||
callbacks.Lock()
|
||||
defer callbacks.Unlock()
|
||||
if callbacks.m == nil {
|
||||
callbacks.m = make(map[uintptr]interface{})
|
||||
}
|
||||
callbacks.c++
|
||||
ret := callbacks.c
|
||||
callbacks.m[ret] = v
|
||||
return ret
|
||||
}
|
||||
|
||||
func callbackLookup(c uintptr) interface{} {
|
||||
callbacks.Lock()
|
||||
defer callbacks.Unlock()
|
||||
ret := callbacks.m[c]
|
||||
if ret == nil {
|
||||
panic("callback pointer not found")
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func callbackDelete(c uintptr) {
|
||||
callbacks.Lock()
|
||||
defer callbacks.Unlock()
|
||||
if callbacks.m[c] == nil {
|
||||
panic("callback pointer not found")
|
||||
}
|
||||
delete(callbacks.m, c)
|
||||
}
|
123
vendor/github.com/proglottis/gpgme/data.go
generated
vendored
123
vendor/github.com/proglottis/gpgme/data.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/cgo"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -19,30 +20,32 @@ const (
|
||||
SeekEnd = C.SEEK_END
|
||||
)
|
||||
|
||||
var dataCallbacks = C.struct_gpgme_data_cbs{
|
||||
read: C.gpgme_data_read_cb_t(C.gogpgme_readfunc),
|
||||
write: C.gpgme_data_write_cb_t(C.gogpgme_writefunc),
|
||||
seek: C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc),
|
||||
}
|
||||
|
||||
//export gogpgme_readfunc
|
||||
func gogpgme_readfunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||
d := callbackLookup(uintptr(handle)).(*Data)
|
||||
if len(d.buf) < int(size) {
|
||||
d.buf = make([]byte, size)
|
||||
}
|
||||
n, err := d.r.Read(d.buf[:size])
|
||||
h := *(*cgo.Handle)(handle)
|
||||
d := h.Value().(*Data)
|
||||
n, err := d.r.Read(unsafe.Slice((*byte)(buffer), size))
|
||||
if err != nil && err != io.EOF {
|
||||
d.err = err
|
||||
C.gpgme_err_set_errno(C.EIO)
|
||||
return -1
|
||||
}
|
||||
C.memcpy(buffer, unsafe.Pointer(&d.buf[0]), C.size_t(n))
|
||||
return C.ssize_t(n)
|
||||
}
|
||||
|
||||
//export gogpgme_writefunc
|
||||
func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||
d := callbackLookup(uintptr(handle)).(*Data)
|
||||
if len(d.buf) < int(size) {
|
||||
d.buf = make([]byte, size)
|
||||
}
|
||||
C.memcpy(unsafe.Pointer(&d.buf[0]), buffer, C.size_t(size))
|
||||
n, err := d.w.Write(d.buf[:size])
|
||||
h := *(*cgo.Handle)(handle)
|
||||
d := h.Value().(*Data)
|
||||
n, err := d.w.Write(unsafe.Slice((*byte)(buffer), size))
|
||||
if err != nil && err != io.EOF {
|
||||
d.err = err
|
||||
C.gpgme_err_set_errno(C.EIO)
|
||||
return -1
|
||||
}
|
||||
@ -51,9 +54,11 @@ func gogpgme_writefunc(handle, buffer unsafe.Pointer, size C.size_t) C.ssize_t {
|
||||
|
||||
//export gogpgme_seekfunc
|
||||
func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int) C.gpgme_off_t {
|
||||
d := callbackLookup(uintptr(handle)).(*Data)
|
||||
h := *(*cgo.Handle)(handle)
|
||||
d := h.Value().(*Data)
|
||||
n, err := d.s.Seek(int64(offset), int(whence))
|
||||
if err != nil {
|
||||
d.err = err
|
||||
C.gpgme_err_set_errno(C.EIO)
|
||||
return -1
|
||||
}
|
||||
@ -63,12 +68,11 @@ func gogpgme_seekfunc(handle unsafe.Pointer, offset C.gpgme_off_t, whence C.int)
|
||||
// The Data buffer used to communicate with GPGME
|
||||
type Data struct {
|
||||
dh C.gpgme_data_t // WARNING: Call runtime.KeepAlive(d) after ANY passing of d.dh to C
|
||||
buf []byte
|
||||
cbs C.struct_gpgme_data_cbs
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
s io.Seeker
|
||||
cbc uintptr // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh)
|
||||
cbc cgo.Handle // WARNING: Call runtime.KeepAlive(d) after ANY use of d.cbc in C (typically via d.dh)
|
||||
err error
|
||||
}
|
||||
|
||||
func newData() *Data {
|
||||
@ -86,6 +90,7 @@ func NewData() (*Data, error) {
|
||||
// NewDataFile returns a new file based data buffer
|
||||
func NewDataFile(f *os.File) (*Data, error) {
|
||||
d := newData()
|
||||
d.r = f
|
||||
return d, handleError(C.gpgme_data_new_from_fd(&d.dh, C.int(f.Fd())))
|
||||
}
|
||||
|
||||
@ -103,20 +108,22 @@ func NewDataBytes(b []byte) (*Data, error) {
|
||||
func NewDataReader(r io.Reader) (*Data, error) {
|
||||
d := newData()
|
||||
d.r = r
|
||||
d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
|
||||
cbc := callbackAdd(d)
|
||||
d.cbc = cbc
|
||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
||||
if s, ok := r.(io.Seeker); ok {
|
||||
d.s = s
|
||||
}
|
||||
d.cbc = cgo.NewHandle(d)
|
||||
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||
}
|
||||
|
||||
// NewDataWriter returns a new callback based data buffer
|
||||
func NewDataWriter(w io.Writer) (*Data, error) {
|
||||
d := newData()
|
||||
d.w = w
|
||||
d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
|
||||
cbc := callbackAdd(d)
|
||||
d.cbc = cbc
|
||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
||||
if s, ok := w.(io.Seeker); ok {
|
||||
d.s = s
|
||||
}
|
||||
d.cbc = cgo.NewHandle(d)
|
||||
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||
}
|
||||
|
||||
// NewDataReadWriter returns a new callback based data buffer
|
||||
@ -124,11 +131,11 @@ func NewDataReadWriter(rw io.ReadWriter) (*Data, error) {
|
||||
d := newData()
|
||||
d.r = rw
|
||||
d.w = rw
|
||||
d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
|
||||
d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
|
||||
cbc := callbackAdd(d)
|
||||
d.cbc = cbc
|
||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
||||
if s, ok := rw.(io.Seeker); ok {
|
||||
d.s = s
|
||||
}
|
||||
d.cbc = cgo.NewHandle(d)
|
||||
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||
}
|
||||
|
||||
// NewDataReadWriteSeeker returns a new callback based data buffer
|
||||
@ -137,12 +144,8 @@ func NewDataReadWriteSeeker(rw io.ReadWriteSeeker) (*Data, error) {
|
||||
d.r = rw
|
||||
d.w = rw
|
||||
d.s = rw
|
||||
d.cbs.read = C.gpgme_data_read_cb_t(C.gogpgme_readfunc)
|
||||
d.cbs.write = C.gpgme_data_write_cb_t(C.gogpgme_writefunc)
|
||||
d.cbs.seek = C.gpgme_data_seek_cb_t(C.gogpgme_seekfunc)
|
||||
cbc := callbackAdd(d)
|
||||
d.cbc = cbc
|
||||
return d, handleError(C.gogpgme_data_new_from_cbs(&d.dh, &d.cbs, C.uintptr_t(cbc)))
|
||||
d.cbc = cgo.NewHandle(d)
|
||||
return d, handleError(C.gpgme_data_new_from_cbs(&d.dh, &dataCallbacks, unsafe.Pointer(&d.cbc)))
|
||||
}
|
||||
|
||||
// Close releases any resources associated with the data buffer
|
||||
@ -151,7 +154,7 @@ func (d *Data) Close() error {
|
||||
return nil
|
||||
}
|
||||
if d.cbc > 0 {
|
||||
callbackDelete(d.cbc)
|
||||
d.cbc.Delete()
|
||||
}
|
||||
_, err := C.gpgme_data_release(d.dh)
|
||||
runtime.KeepAlive(d)
|
||||
@ -160,24 +163,42 @@ func (d *Data) Close() error {
|
||||
}
|
||||
|
||||
func (d *Data) Write(p []byte) (int, error) {
|
||||
n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
var buffer *byte
|
||||
if len(p) > 0 {
|
||||
buffer = &p[0]
|
||||
}
|
||||
if n == 0 {
|
||||
|
||||
n, err := C.gpgme_data_write(d.dh, unsafe.Pointer(buffer), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
switch {
|
||||
case d.err != nil:
|
||||
defer func() { d.err = nil }()
|
||||
|
||||
return 0, d.err
|
||||
case err != nil:
|
||||
return 0, err
|
||||
case len(p) > 0 && n == 0:
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(n), nil
|
||||
}
|
||||
|
||||
func (d *Data) Read(p []byte) (int, error) {
|
||||
n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(&p[0]), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
var buffer *byte
|
||||
if len(p) > 0 {
|
||||
buffer = &p[0]
|
||||
}
|
||||
if n == 0 {
|
||||
|
||||
n, err := C.gpgme_data_read(d.dh, unsafe.Pointer(buffer), C.size_t(len(p)))
|
||||
runtime.KeepAlive(d)
|
||||
switch {
|
||||
case d.err != nil:
|
||||
defer func() { d.err = nil }()
|
||||
|
||||
return 0, d.err
|
||||
case err != nil:
|
||||
return 0, err
|
||||
case len(p) > 0 && n == 0:
|
||||
return 0, io.EOF
|
||||
}
|
||||
return int(n), nil
|
||||
@ -186,7 +207,15 @@ func (d *Data) Read(p []byte) (int, error) {
|
||||
func (d *Data) Seek(offset int64, whence int) (int64, error) {
|
||||
n, err := C.gogpgme_data_seek(d.dh, C.gpgme_off_t(offset), C.int(whence))
|
||||
runtime.KeepAlive(d)
|
||||
return int64(n), err
|
||||
switch {
|
||||
case d.err != nil:
|
||||
defer func() { d.err = nil }()
|
||||
|
||||
return 0, d.err
|
||||
case err != nil:
|
||||
return 0, err
|
||||
}
|
||||
return int64(n), nil
|
||||
}
|
||||
|
||||
// Name returns the associated filename if any
|
||||
|
20
vendor/github.com/proglottis/gpgme/go_gpgme.c
generated
vendored
20
vendor/github.com/proglottis/gpgme/go_gpgme.c
generated
vendored
@ -1,13 +1,5 @@
|
||||
#include "go_gpgme.h"
|
||||
|
||||
gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle) {
|
||||
return gpgme_data_new_from_cbs(dh, cbs, (void *)handle);
|
||||
}
|
||||
|
||||
void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle) {
|
||||
gpgme_set_passphrase_cb(ctx, cb, (void *)handle);
|
||||
}
|
||||
|
||||
gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
||||
return gpgme_data_seek(dh, offset, whence);
|
||||
}
|
||||
@ -15,17 +7,17 @@ gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence) {
|
||||
gpgme_error_t gogpgme_op_assuan_transact_ext(
|
||||
gpgme_ctx_t ctx,
|
||||
char* cmd,
|
||||
uintptr_t data_h,
|
||||
uintptr_t inquiry_h,
|
||||
uintptr_t status_h,
|
||||
void* data_h,
|
||||
void* inquiry_h,
|
||||
void* status_h,
|
||||
gpgme_error_t *operr
|
||||
){
|
||||
return gpgme_op_assuan_transact_ext(
|
||||
ctx,
|
||||
cmd,
|
||||
(gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, (void *)data_h,
|
||||
(gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, (void *)inquiry_h,
|
||||
(gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, (void *)status_h,
|
||||
(gpgme_assuan_data_cb_t) gogpgme_assuan_data_callback, data_h,
|
||||
(gpgme_assuan_inquire_cb_t) gogpgme_assuan_inquiry_callback, inquiry_h,
|
||||
(gpgme_assuan_status_cb_t) gogpgme_assuan_status_callback, status_h,
|
||||
operr
|
||||
);
|
||||
}
|
||||
|
4
vendor/github.com/proglottis/gpgme/go_gpgme.h
generated
vendored
4
vendor/github.com/proglottis/gpgme/go_gpgme.h
generated
vendored
@ -10,11 +10,9 @@ extern ssize_t gogpgme_readfunc(void *handle, void *buffer, size_t size);
|
||||
extern ssize_t gogpgme_writefunc(void *handle, void *buffer, size_t size);
|
||||
extern off_t gogpgme_seekfunc(void *handle, off_t offset, int whence);
|
||||
extern gpgme_error_t gogpgme_passfunc(void *hook, char *uid_hint, char *passphrase_info, int prev_was_bad, int fd);
|
||||
extern gpgme_error_t gogpgme_data_new_from_cbs(gpgme_data_t *dh, gpgme_data_cbs_t cbs, uintptr_t handle);
|
||||
extern void gogpgme_set_passphrase_cb(gpgme_ctx_t ctx, gpgme_passphrase_cb_t cb, uintptr_t handle);
|
||||
extern gpgme_off_t gogpgme_data_seek(gpgme_data_t dh, gpgme_off_t offset, int whence);
|
||||
|
||||
extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, uintptr_t data_h, uintptr_t inquiry_h , uintptr_t status_h, gpgme_error_t *operr);
|
||||
extern gpgme_error_t gogpgme_op_assuan_transact_ext(gpgme_ctx_t ctx, char *cmd, void *data_h, void *inquiry_h , void *status_h, gpgme_error_t *operr);
|
||||
|
||||
extern gpgme_error_t gogpgme_assuan_data_callback(void *opaque, void* data, size_t datalen );
|
||||
extern gpgme_error_t gogpgme_assuan_inquiry_callback(void *opaque, char* name, char* args);
|
||||
|
78
vendor/github.com/proglottis/gpgme/gpgme.go
generated
vendored
78
vendor/github.com/proglottis/gpgme/gpgme.go
generated
vendored
@ -7,11 +7,13 @@ package gpgme
|
||||
// #include <gpgme.h>
|
||||
// #include "go_gpgme.h"
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/cgo"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
@ -27,7 +29,8 @@ type Callback func(uidHint string, prevWasBad bool, f *os.File) error
|
||||
|
||||
//export gogpgme_passfunc
|
||||
func gogpgme_passfunc(hook unsafe.Pointer, uid_hint, passphrase_info *C.char, prev_was_bad, fd C.int) C.gpgme_error_t {
|
||||
c := callbackLookup(uintptr(hook)).(*Context)
|
||||
h := *(*cgo.Handle)(hook)
|
||||
c := h.Value().(*Context)
|
||||
go_uid_hint := C.GoString(uid_hint)
|
||||
f := os.NewFile(uintptr(fd), go_uid_hint)
|
||||
defer f.Close()
|
||||
@ -233,6 +236,17 @@ func SetEngineInfo(proto Protocol, fileName, homeDir string) error {
|
||||
return handleError(C.gpgme_set_engine_info(C.gpgme_protocol_t(proto), cfn, chome))
|
||||
}
|
||||
|
||||
func GetDirInfo(what string) string {
|
||||
cwhat := C.CString(what)
|
||||
defer C.free(unsafe.Pointer(cwhat))
|
||||
|
||||
cdir := C.gpgme_get_dirinfo(cwhat)
|
||||
if cdir == nil {
|
||||
return ""
|
||||
}
|
||||
return C.GoString(cdir)
|
||||
}
|
||||
|
||||
func FindKeys(pattern string, secretOnly bool) ([]*Key, error) {
|
||||
var keys []*Key
|
||||
ctx, err := New()
|
||||
@ -243,7 +257,7 @@ func FindKeys(pattern string, secretOnly bool) ([]*Key, error) {
|
||||
if err := ctx.KeyListStart(pattern, secretOnly); err != nil {
|
||||
return keys, err
|
||||
}
|
||||
defer ctx.KeyListEnd()
|
||||
defer func() { _ = ctx.KeyListEnd() }()
|
||||
for ctx.KeyListNext() {
|
||||
keys = append(keys, ctx.Key)
|
||||
}
|
||||
@ -268,8 +282,10 @@ func Decrypt(r io.Reader) (*Data, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = ctx.Decrypt(cipher, plain)
|
||||
plain.Seek(0, SeekSet)
|
||||
if err := ctx.Decrypt(cipher, plain); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = plain.Seek(0, SeekSet)
|
||||
return plain, err
|
||||
}
|
||||
|
||||
@ -278,7 +294,7 @@ type Context struct {
|
||||
KeyError error
|
||||
|
||||
callback Callback
|
||||
cbc uintptr // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx)
|
||||
cbc cgo.Handle // WARNING: Call runtime.KeepAlive(c) after ANY use of c.cbc in C (typically via c.ctx)
|
||||
|
||||
ctx C.gpgme_ctx_t // WARNING: Call runtime.KeepAlive(c) after ANY passing of c.ctx to C
|
||||
}
|
||||
@ -295,7 +311,7 @@ func (c *Context) Release() {
|
||||
return
|
||||
}
|
||||
if c.cbc > 0 {
|
||||
callbackDelete(c.cbc)
|
||||
c.cbc.Delete()
|
||||
}
|
||||
C.gpgme_release(c.ctx)
|
||||
runtime.KeepAlive(c)
|
||||
@ -364,15 +380,14 @@ func (c *Context) SetCallback(callback Callback) error {
|
||||
var err error
|
||||
c.callback = callback
|
||||
if c.cbc > 0 {
|
||||
callbackDelete(c.cbc)
|
||||
c.cbc.Delete()
|
||||
}
|
||||
if callback != nil {
|
||||
cbc := callbackAdd(c)
|
||||
c.cbc = cbc
|
||||
_, err = C.gogpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), C.uintptr_t(cbc))
|
||||
c.cbc = cgo.NewHandle(c)
|
||||
_, err = C.gpgme_set_passphrase_cb(c.ctx, C.gpgme_passphrase_cb_t(C.gogpgme_passfunc), unsafe.Pointer(&c.cbc))
|
||||
} else {
|
||||
c.cbc = 0
|
||||
_, err = C.gogpgme_set_passphrase_cb(c.ctx, nil, 0)
|
||||
_, err = C.gpgme_set_passphrase_cb(c.ctx, nil, nil)
|
||||
}
|
||||
runtime.KeepAlive(c)
|
||||
return err
|
||||
@ -564,9 +579,11 @@ func (c *Context) Sign(signers []*Key, plain, sig *Data, mode SigMode) error {
|
||||
return err
|
||||
}
|
||||
|
||||
type AssuanDataCallback func(data []byte) error
|
||||
type AssuanInquireCallback func(name, args string) error
|
||||
type AssuanStatusCallback func(status, args string) error
|
||||
type (
|
||||
AssuanDataCallback func(data []byte) error
|
||||
AssuanInquireCallback func(name, args string) error
|
||||
AssuanStatusCallback func(status, args string) error
|
||||
)
|
||||
|
||||
// AssuanSend sends a raw Assuan command to gpg-agent
|
||||
func (c *Context) AssuanSend(
|
||||
@ -577,17 +594,17 @@ func (c *Context) AssuanSend(
|
||||
) error {
|
||||
var operr C.gpgme_error_t
|
||||
|
||||
dataPtr := callbackAdd(&data)
|
||||
inquiryPtr := callbackAdd(&inquiry)
|
||||
statusPtr := callbackAdd(&status)
|
||||
dataPtr := cgo.NewHandle(&data)
|
||||
inquiryPtr := cgo.NewHandle(&inquiry)
|
||||
statusPtr := cgo.NewHandle(&status)
|
||||
cmdCStr := C.CString(cmd)
|
||||
defer C.free(unsafe.Pointer(cmdCStr))
|
||||
err := C.gogpgme_op_assuan_transact_ext(
|
||||
c.ctx,
|
||||
cmdCStr,
|
||||
C.uintptr_t(dataPtr),
|
||||
C.uintptr_t(inquiryPtr),
|
||||
C.uintptr_t(statusPtr),
|
||||
unsafe.Pointer(&dataPtr),
|
||||
unsafe.Pointer(&inquiryPtr),
|
||||
unsafe.Pointer(&statusPtr),
|
||||
&operr,
|
||||
)
|
||||
runtime.KeepAlive(c)
|
||||
@ -600,11 +617,14 @@ func (c *Context) AssuanSend(
|
||||
|
||||
//export gogpgme_assuan_data_callback
|
||||
func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, datalen C.size_t) C.gpgme_error_t {
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanDataCallback)
|
||||
h := *(*cgo.Handle)(handle)
|
||||
c := h.Value().(*AssuanDataCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(C.GoBytes(data, C.int(datalen)))
|
||||
if err := (*c)(C.GoBytes(data, C.int(datalen))); err != nil {
|
||||
return C.gpgme_error(C.GPG_ERR_USER_1)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -612,11 +632,14 @@ func gogpgme_assuan_data_callback(handle unsafe.Pointer, data unsafe.Pointer, da
|
||||
func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||
name := C.GoString(cName)
|
||||
args := C.GoString(cArgs)
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanInquireCallback)
|
||||
h := *(*cgo.Handle)(handle)
|
||||
c := h.Value().(*AssuanInquireCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(name, args)
|
||||
if err := (*c)(name, args); err != nil {
|
||||
return C.gpgme_error(C.GPG_ERR_USER_1)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
@ -624,11 +647,14 @@ func gogpgme_assuan_inquiry_callback(handle unsafe.Pointer, cName *C.char, cArgs
|
||||
func gogpgme_assuan_status_callback(handle unsafe.Pointer, cStatus *C.char, cArgs *C.char) C.gpgme_error_t {
|
||||
status := C.GoString(cStatus)
|
||||
args := C.GoString(cArgs)
|
||||
c := callbackLookup(uintptr(handle)).(*AssuanStatusCallback)
|
||||
h := *(*cgo.Handle)(handle)
|
||||
c := h.Value().(*AssuanStatusCallback)
|
||||
if *c == nil {
|
||||
return 0
|
||||
}
|
||||
(*c)(status, args)
|
||||
if err := (*c)(status, args); err != nil {
|
||||
return C.gpgme_error(C.GPG_ERR_USER_1)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
|
1
vendor/github.com/proglottis/gpgme/unset_agent_info.go
generated
vendored
1
vendor/github.com/proglottis/gpgme/unset_agent_info.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package gpgme
|
||||
|
6
vendor/github.com/sigstore/rekor/CONTRIBUTORS.md
generated
vendored
6
vendor/github.com/sigstore/rekor/CONTRIBUTORS.md
generated
vendored
@ -109,6 +109,12 @@ to github):
|
||||
git push origin your-branch --force
|
||||
|
||||
Alternatively, a core member can squash your commits within Github.
|
||||
|
||||
## DCO Signoff
|
||||
|
||||
Make sure to sign the [Developer Certificate of
|
||||
Origin](https://git-scm.com/docs/git-commit#Documentation/git-commit.txt---signoff).
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Rekor adheres to and enforces the [Contributor Covenant](http://contributor-covenant.org/version/1/4/) Code of Conduct.
|
||||
|
51
vendor/github.com/sigstore/rekor/pkg/client/options.go
generated
vendored
51
vendor/github.com/sigstore/rekor/pkg/client/options.go
generated
vendored
@ -16,6 +16,7 @@ package client
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
)
|
||||
@ -24,10 +25,14 @@ import (
|
||||
type Option func(*options)
|
||||
|
||||
type options struct {
|
||||
UserAgent string
|
||||
RetryCount uint
|
||||
InsecureTLS bool
|
||||
Logger interface{}
|
||||
UserAgent string
|
||||
RetryCount uint
|
||||
RetryWaitMin time.Duration
|
||||
RetryWaitMax time.Duration
|
||||
InsecureTLS bool
|
||||
Logger interface{}
|
||||
NoDisableKeepalives bool
|
||||
Headers map[string][]string
|
||||
}
|
||||
|
||||
const (
|
||||
@ -62,6 +67,20 @@ func WithRetryCount(retryCount uint) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryWaitMin sets the minimum length of time to wait between retries.
|
||||
func WithRetryWaitMin(t time.Duration) Option {
|
||||
return func(o *options) {
|
||||
o.RetryWaitMin = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryWaitMax sets the minimum length of time to wait between retries.
|
||||
func WithRetryWaitMax(t time.Duration) Option {
|
||||
return func(o *options) {
|
||||
o.RetryWaitMax = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithLogger sets the logger; it must implement either retryablehttp.Logger or retryablehttp.LeveledLogger; if not, this will not take effect.
|
||||
func WithLogger(logger interface{}) Option {
|
||||
return func(o *options) {
|
||||
@ -72,20 +91,41 @@ func WithLogger(logger interface{}) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithInsecureTLS disables TLS verification.
|
||||
func WithInsecureTLS(enabled bool) Option {
|
||||
return func(o *options) {
|
||||
o.InsecureTLS = enabled
|
||||
}
|
||||
}
|
||||
|
||||
// WithNoDisableKeepalives unsets the default DisableKeepalives setting.
|
||||
func WithNoDisableKeepalives(noDisableKeepalives bool) Option {
|
||||
return func(o *options) {
|
||||
o.NoDisableKeepalives = noDisableKeepalives
|
||||
}
|
||||
}
|
||||
|
||||
// WithHeaders sets default headers for every client request.
|
||||
func WithHeaders(h map[string][]string) Option {
|
||||
return func(o *options) {
|
||||
o.Headers = h
|
||||
}
|
||||
}
|
||||
|
||||
type roundTripper struct {
|
||||
http.RoundTripper
|
||||
UserAgent string
|
||||
Headers map[string][]string
|
||||
}
|
||||
|
||||
// RoundTrip implements `http.RoundTripper`
|
||||
func (rt *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
req.Header.Set("User-Agent", rt.UserAgent)
|
||||
for k, v := range rt.Headers {
|
||||
for _, h := range v {
|
||||
req.Header.Add(k, h)
|
||||
}
|
||||
}
|
||||
return rt.RoundTripper.RoundTrip(req)
|
||||
}
|
||||
|
||||
@ -93,12 +133,13 @@ func createRoundTripper(inner http.RoundTripper, o *options) http.RoundTripper {
|
||||
if inner == nil {
|
||||
inner = http.DefaultTransport
|
||||
}
|
||||
if o.UserAgent == "" {
|
||||
if o.UserAgent == "" && o.Headers == nil {
|
||||
// There's nothing to do...
|
||||
return inner
|
||||
}
|
||||
return &roundTripper{
|
||||
RoundTripper: inner,
|
||||
UserAgent: o.UserAgent,
|
||||
Headers: o.Headers,
|
||||
}
|
||||
}
|
||||
|
6
vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go
generated
vendored
6
vendor/github.com/sigstore/rekor/pkg/client/rekor_client.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"github.com/go-openapi/runtime"
|
||||
httptransport "github.com/go-openapi/runtime/client"
|
||||
"github.com/go-openapi/strfmt"
|
||||
|
||||
"github.com/hashicorp/go-cleanhttp"
|
||||
retryablehttp "github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/sigstore/rekor/pkg/generated/client"
|
||||
@ -37,6 +38,9 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error
|
||||
|
||||
retryableClient := retryablehttp.NewClient()
|
||||
defaultTransport := cleanhttp.DefaultTransport()
|
||||
if o.NoDisableKeepalives {
|
||||
defaultTransport.DisableKeepAlives = false
|
||||
}
|
||||
if o.InsecureTLS {
|
||||
/* #nosec G402 */
|
||||
defaultTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
|
||||
@ -45,6 +49,8 @@ func GetRekorClient(rekorServerURL string, opts ...Option) (*client.Rekor, error
|
||||
Transport: defaultTransport,
|
||||
}
|
||||
retryableClient.RetryMax = int(o.RetryCount)
|
||||
retryableClient.RetryWaitMin = o.RetryWaitMin
|
||||
retryableClient.RetryWaitMax = o.RetryWaitMax
|
||||
retryableClient.Logger = o.Logger
|
||||
|
||||
httpClient := retryableClient.StandardClient()
|
||||
|
@ -22,6 +22,7 @@ package entries
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -126,11 +127,13 @@ func (o *CreateLogEntryCreated) Code() int {
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryCreated) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %+v", 201, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryCreated) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %+v", 201, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryCreated %s", 201, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryCreated) GetPayload() models.LogEntry {
|
||||
@ -210,11 +213,13 @@ func (o *CreateLogEntryBadRequest) Code() int {
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryBadRequest) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryBadRequest) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryBadRequest) GetPayload() *models.Error {
|
||||
@ -280,11 +285,13 @@ func (o *CreateLogEntryConflict) Code() int {
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryConflict) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %+v", 409, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryConflict) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %+v", 409, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntryConflict %s", 409, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryConflict) GetPayload() *models.Error {
|
||||
@ -363,11 +370,13 @@ func (o *CreateLogEntryDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryDefault) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryDefault) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries][%d] createLogEntry default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *CreateLogEntryDefault) GetPayload() *models.Error {
|
||||
|
28
vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go
generated
vendored
28
vendor/github.com/sigstore/rekor/pkg/generated/client/entries/entries_client.go
generated
vendored
@ -23,6 +23,7 @@ package entries
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/runtime"
|
||||
httptransport "github.com/go-openapi/runtime/client"
|
||||
"github.com/go-openapi/strfmt"
|
||||
)
|
||||
|
||||
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
|
||||
return &Client{transport: transport, formats: formats}
|
||||
}
|
||||
|
||||
// New creates a new entries API client with basic auth credentials.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - user: user for basic authentication header.
|
||||
// - password: password for basic authentication header.
|
||||
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
// New creates a new entries API client with a bearer token for authentication.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - bearerToken: bearer token for Bearer authentication header.
|
||||
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
/*
|
||||
Client for entries API
|
||||
*/
|
||||
@ -39,7 +65,7 @@ type Client struct {
|
||||
formats strfmt.Registry
|
||||
}
|
||||
|
||||
// ClientOption is the option for Client methods
|
||||
// ClientOption may be used to customize the behavior of Client methods.
|
||||
type ClientOption func(*runtime.ClientOperation)
|
||||
|
||||
// ClientService is the interface for Client methods
|
||||
|
@ -22,6 +22,7 @@ package entries
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -108,11 +109,13 @@ func (o *GetLogEntryByIndexOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexOK) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexOK) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexOK) GetPayload() models.LogEntry {
|
||||
@ -173,11 +176,11 @@ func (o *GetLogEntryByIndexNotFound) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexNotFound) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound ", 404)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexNotFound) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound ", 404)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndexNotFound", 404)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
|
||||
@ -234,11 +237,13 @@ func (o *GetLogEntryByIndexDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexDefault) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexDefault) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries][%d] getLogEntryByIndex default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByIndexDefault) GetPayload() *models.Error {
|
||||
|
@ -22,6 +22,7 @@ package entries
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -108,11 +109,13 @@ func (o *GetLogEntryByUUIDOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDOK) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDOK) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDOK) GetPayload() models.LogEntry {
|
||||
@ -173,11 +176,11 @@ func (o *GetLogEntryByUUIDNotFound) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDNotFound) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound ", 404)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDNotFound) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound ", 404)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUuidNotFound", 404)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
|
||||
@ -234,11 +237,13 @@ func (o *GetLogEntryByUUIDDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDDefault) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDDefault) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/entries/{entryUUID}][%d] getLogEntryByUUID default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogEntryByUUIDDefault) GetPayload() *models.Error {
|
||||
|
@ -22,6 +22,7 @@ package entries
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -114,11 +115,13 @@ func (o *SearchLogQueryOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryOK) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryOK) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryOK) GetPayload() []models.LogEntry {
|
||||
@ -180,11 +183,13 @@ func (o *SearchLogQueryBadRequest) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryBadRequest) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryBadRequest) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryBadRequest) GetPayload() *models.Error {
|
||||
@ -248,11 +253,13 @@ func (o *SearchLogQueryUnprocessableEntity) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryUnprocessableEntity) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryUnprocessableEntity) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %+v", 422, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQueryUnprocessableEntity %s", 422, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryUnprocessableEntity) GetPayload() *models.Error {
|
||||
@ -320,11 +327,13 @@ func (o *SearchLogQueryDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryDefault) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryDefault) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/log/entries/retrieve][%d] searchLogQuery default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *SearchLogQueryDefault) GetPayload() *models.Error {
|
||||
|
28
vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go
generated
vendored
28
vendor/github.com/sigstore/rekor/pkg/generated/client/index/index_client.go
generated
vendored
@ -23,6 +23,7 @@ package index
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/runtime"
|
||||
httptransport "github.com/go-openapi/runtime/client"
|
||||
"github.com/go-openapi/strfmt"
|
||||
)
|
||||
|
||||
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
|
||||
return &Client{transport: transport, formats: formats}
|
||||
}
|
||||
|
||||
// New creates a new index API client with basic auth credentials.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - user: user for basic authentication header.
|
||||
// - password: password for basic authentication header.
|
||||
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
// New creates a new index API client with a bearer token for authentication.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - bearerToken: bearer token for Bearer authentication header.
|
||||
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
/*
|
||||
Client for index API
|
||||
*/
|
||||
@ -39,7 +65,7 @@ type Client struct {
|
||||
formats strfmt.Registry
|
||||
}
|
||||
|
||||
// ClientOption is the option for Client methods
|
||||
// ClientOption may be used to customize the behavior of Client methods.
|
||||
type ClientOption func(*runtime.ClientOperation)
|
||||
|
||||
// ClientService is the interface for Client methods
|
||||
|
19
vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go
generated
vendored
19
vendor/github.com/sigstore/rekor/pkg/generated/client/index/search_index_responses.go
generated
vendored
@ -22,6 +22,7 @@ package index
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -108,11 +109,13 @@ func (o *SearchIndexOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchIndexOK) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *SearchIndexOK) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *SearchIndexOK) GetPayload() []string {
|
||||
@ -174,11 +177,13 @@ func (o *SearchIndexBadRequest) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchIndexBadRequest) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *SearchIndexBadRequest) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndexBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *SearchIndexBadRequest) GetPayload() *models.Error {
|
||||
@ -246,11 +251,13 @@ func (o *SearchIndexDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *SearchIndexDefault) Error() string {
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *SearchIndexDefault) String() string {
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[POST /api/v1/index/retrieve][%d] searchIndex default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *SearchIndexDefault) GetPayload() *models.Error {
|
||||
|
13
vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go
generated
vendored
13
vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/get_public_key_responses.go
generated
vendored
@ -22,6 +22,7 @@ package pubkey
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -102,11 +103,13 @@ func (o *GetPublicKeyOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetPublicKeyOK) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetPublicKeyOK) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKeyOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetPublicKeyOK) GetPayload() string {
|
||||
@ -172,11 +175,13 @@ func (o *GetPublicKeyDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetPublicKeyDefault) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetPublicKeyDefault) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/publicKey][%d] getPublicKey default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetPublicKeyDefault) GetPayload() *models.Error {
|
||||
|
52
vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go
generated
vendored
52
vendor/github.com/sigstore/rekor/pkg/generated/client/pubkey/pubkey_client.go
generated
vendored
@ -23,6 +23,7 @@ package pubkey
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/runtime"
|
||||
httptransport "github.com/go-openapi/runtime/client"
|
||||
"github.com/go-openapi/strfmt"
|
||||
)
|
||||
|
||||
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
|
||||
return &Client{transport: transport, formats: formats}
|
||||
}
|
||||
|
||||
// New creates a new pubkey API client with basic auth credentials.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - user: user for basic authentication header.
|
||||
// - password: password for basic authentication header.
|
||||
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
// New creates a new pubkey API client with a bearer token for authentication.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - bearerToken: bearer token for Bearer authentication header.
|
||||
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
/*
|
||||
Client for pubkey API
|
||||
*/
|
||||
@ -39,9 +65,33 @@ type Client struct {
|
||||
formats strfmt.Registry
|
||||
}
|
||||
|
||||
// ClientOption is the option for Client methods
|
||||
// ClientOption may be used to customize the behavior of Client methods.
|
||||
type ClientOption func(*runtime.ClientOperation)
|
||||
|
||||
// This client is generated with a few options you might find useful for your swagger spec.
|
||||
//
|
||||
// Feel free to add you own set of options.
|
||||
|
||||
// WithAccept allows the client to force the Accept header
|
||||
// to negotiate a specific Producer from the server.
|
||||
//
|
||||
// You may use this option to set arbitrary extensions to your MIME media type.
|
||||
func WithAccept(mime string) ClientOption {
|
||||
return func(r *runtime.ClientOperation) {
|
||||
r.ProducesMediaTypes = []string{mime}
|
||||
}
|
||||
}
|
||||
|
||||
// WithAcceptApplicationJSON sets the Accept header to "application/json".
|
||||
func WithAcceptApplicationJSON(r *runtime.ClientOperation) {
|
||||
r.ProducesMediaTypes = []string{"application/json"}
|
||||
}
|
||||
|
||||
// WithAcceptApplicationxPemFile sets the Accept header to "application/x-pem-file".
|
||||
func WithAcceptApplicationxPemFile(r *runtime.ClientOperation) {
|
||||
r.ProducesMediaTypes = []string{"application/x-pem-file"}
|
||||
}
|
||||
|
||||
// ClientService is the interface for Client methods
|
||||
type ClientService interface {
|
||||
GetPublicKey(params *GetPublicKeyParams, opts ...ClientOption) (*GetPublicKeyOK, error)
|
||||
|
13
vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go
generated
vendored
13
vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_info_responses.go
generated
vendored
@ -22,6 +22,7 @@ package tlog
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -102,11 +103,13 @@ func (o *GetLogInfoOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogInfoOK) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogInfoOK) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfoOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogInfoOK) GetPayload() *models.LogInfo {
|
||||
@ -174,11 +177,13 @@ func (o *GetLogInfoDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogInfoDefault) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogInfoDefault) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log][%d] getLogInfo default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogInfoDefault) GetPayload() *models.Error {
|
||||
|
19
vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go
generated
vendored
19
vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/get_log_proof_responses.go
generated
vendored
@ -22,6 +22,7 @@ package tlog
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
@ -108,11 +109,13 @@ func (o *GetLogProofOK) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogProofOK) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogProofOK) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %+v", 200, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofOK %s", 200, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogProofOK) GetPayload() *models.ConsistencyProof {
|
||||
@ -176,11 +179,13 @@ func (o *GetLogProofBadRequest) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogProofBadRequest) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogProofBadRequest) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %+v", 400, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProofBadRequest %s", 400, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogProofBadRequest) GetPayload() *models.Error {
|
||||
@ -248,11 +253,13 @@ func (o *GetLogProofDefault) Code() int {
|
||||
}
|
||||
|
||||
func (o *GetLogProofDefault) Error() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogProofDefault) String() string {
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %+v", o._statusCode, o.Payload)
|
||||
payload, _ := json.Marshal(o.Payload)
|
||||
return fmt.Sprintf("[GET /api/v1/log/proof][%d] getLogProof default %s", o._statusCode, payload)
|
||||
}
|
||||
|
||||
func (o *GetLogProofDefault) GetPayload() *models.Error {
|
||||
|
28
vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go
generated
vendored
28
vendor/github.com/sigstore/rekor/pkg/generated/client/tlog/tlog_client.go
generated
vendored
@ -23,6 +23,7 @@ package tlog
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/runtime"
|
||||
httptransport "github.com/go-openapi/runtime/client"
|
||||
"github.com/go-openapi/strfmt"
|
||||
)
|
||||
|
||||
@ -31,6 +32,31 @@ func New(transport runtime.ClientTransport, formats strfmt.Registry) ClientServi
|
||||
return &Client{transport: transport, formats: formats}
|
||||
}
|
||||
|
||||
// New creates a new tlog API client with basic auth credentials.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - user: user for basic authentication header.
|
||||
// - password: password for basic authentication header.
|
||||
func NewClientWithBasicAuth(host, basePath, scheme, user, password string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BasicAuth(user, password)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
// New creates a new tlog API client with a bearer token for authentication.
|
||||
// It takes the following parameters:
|
||||
// - host: http host (github.com).
|
||||
// - basePath: any base path for the API client ("/v1", "/v3").
|
||||
// - scheme: http scheme ("http", "https").
|
||||
// - bearerToken: bearer token for Bearer authentication header.
|
||||
func NewClientWithBearerToken(host, basePath, scheme, bearerToken string) ClientService {
|
||||
transport := httptransport.New(host, basePath, []string{scheme})
|
||||
transport.DefaultAuthentication = httptransport.BearerToken(bearerToken)
|
||||
return &Client{transport: transport, formats: strfmt.Default}
|
||||
}
|
||||
|
||||
/*
|
||||
Client for tlog API
|
||||
*/
|
||||
@ -39,7 +65,7 @@ type Client struct {
|
||||
formats strfmt.Registry
|
||||
}
|
||||
|
||||
// ClientOption is the option for Client methods
|
||||
// ClientOption may be used to customize the behavior of Client methods.
|
||||
type ClientOption func(*runtime.ClientOperation)
|
||||
|
||||
// ClientService is the interface for Client methods
|
||||
|
2
vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go
generated
vendored
2
vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go
generated
vendored
@ -294,7 +294,7 @@ type AlpineV001SchemaPackageHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the package
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go
generated
vendored
@ -307,7 +307,7 @@ type CoseV001SchemaDataEnvelopeHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the envelope
|
||||
@ -417,7 +417,7 @@ type CoseV001SchemaDataPayloadHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the content
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go
generated
vendored
@ -312,7 +312,7 @@ type DSSEV001SchemaEnvelopeHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The value of the computed digest over the entire envelope
|
||||
@ -422,7 +422,7 @@ type DSSEV001SchemaPayloadHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The value of the computed digest over the payload within the envelope
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_schema.go
generated
vendored
@ -21,9 +21,9 @@ package models
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
// HashedrekordSchema Rekor Schema
|
||||
// HashedrekordSchema Hashedrekord Schema
|
||||
//
|
||||
// # Schema for Rekord objects
|
||||
// # Schema for Hashedrekord objects
|
||||
//
|
||||
// swagger:model hashedrekordSchema
|
||||
type HashedrekordSchema interface{}
|
||||
|
2
vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go
generated
vendored
2
vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go
generated
vendored
@ -277,7 +277,7 @@ type HashedrekordV001SchemaDataHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256 sha384 sha512]
|
||||
// Enum: ["sha256","sha384","sha512"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the content, as represented by a lower case hexadecimal string
|
||||
|
2
vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go
generated
vendored
2
vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go
generated
vendored
@ -326,7 +326,7 @@ type HelmV001SchemaChartHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the chart
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go
generated
vendored
@ -300,7 +300,7 @@ type IntotoV001SchemaContentHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the archive
|
||||
@ -410,7 +410,7 @@ type IntotoV001SchemaContentPayloadHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the envelope's payload
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go
generated
vendored
@ -543,7 +543,7 @@ type IntotoV002SchemaContentHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the archive
|
||||
@ -653,7 +653,7 @@ type IntotoV002SchemaContentPayloadHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value of the payload
|
||||
|
2
vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go
generated
vendored
2
vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go
generated
vendored
@ -283,7 +283,7 @@ type JarV001SchemaArchiveHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the archive
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go
generated
vendored
@ -281,7 +281,7 @@ type RekordV001SchemaDataHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the content
|
||||
@ -396,7 +396,7 @@ type RekordV001SchemaSignature struct {
|
||||
|
||||
// Specifies the format of the signature
|
||||
// Required: true
|
||||
// Enum: [pgp minisign x509 ssh]
|
||||
// Enum: ["pgp","minisign","x509","ssh"]
|
||||
Format *string `json:"format"`
|
||||
|
||||
// public key
|
||||
|
2
vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go
generated
vendored
2
vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go
generated
vendored
@ -294,7 +294,7 @@ type RpmV001SchemaPackageHash struct {
|
||||
|
||||
// The hashing function used to compute the hash value
|
||||
// Required: true
|
||||
// Enum: [sha256]
|
||||
// Enum: ["sha256"]
|
||||
Algorithm *string `json:"algorithm"`
|
||||
|
||||
// The hash value for the package
|
||||
|
4
vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go
generated
vendored
4
vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go
generated
vendored
@ -45,7 +45,7 @@ type SearchIndex struct {
|
||||
Hash string `json:"hash,omitempty"`
|
||||
|
||||
// operator
|
||||
// Enum: [and or]
|
||||
// Enum: ["and","or"]
|
||||
Operator string `json:"operator,omitempty"`
|
||||
|
||||
// public key
|
||||
@ -227,7 +227,7 @@ type SearchIndexPublicKey struct {
|
||||
|
||||
// format
|
||||
// Required: true
|
||||
// Enum: [pgp x509 minisign ssh tuf]
|
||||
// Enum: ["pgp","x509","minisign","ssh","tuf"]
|
||||
Format *string `json:"format"`
|
||||
|
||||
// url
|
||||
|
33
vendor/github.com/sigstore/rekor/pkg/util/signed_note.go
generated
vendored
33
vendor/github.com/sigstore/rekor/pkg/util/signed_note.go
generated
vendored
@ -18,6 +18,7 @@ package util
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/rsa"
|
||||
@ -53,16 +54,14 @@ func (s *SignedNote) Sign(identity string, signer signature.Signer, opts signatu
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieving public key: %w", err)
|
||||
}
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(pk)
|
||||
pkHash, err := getPublicKeyHash(pk)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("marshalling public key: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pkSha := sha256.Sum256(pubKeyBytes)
|
||||
|
||||
signature := note.Signature{
|
||||
Name: identity,
|
||||
Hash: binary.BigEndian.Uint32(pkSha[:]),
|
||||
Hash: pkHash,
|
||||
Base64: base64.StdEncoding.EncodeToString(sig),
|
||||
}
|
||||
|
||||
@ -80,15 +79,25 @@ func (s SignedNote) Verify(verifier signature.Verifier) bool {
|
||||
msg := []byte(s.Note)
|
||||
digest := sha256.Sum256(msg)
|
||||
|
||||
pk, err := verifier.PublicKey()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
verifierPkHash, err := getPublicKeyHash(pk)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, s := range s.Signatures {
|
||||
sigBytes, err := base64.StdEncoding.DecodeString(s.Base64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
pk, err := verifier.PublicKey()
|
||||
if err != nil {
|
||||
|
||||
if s.Hash != verifierPkHash {
|
||||
return false
|
||||
}
|
||||
|
||||
opts := []signature.VerifyOption{}
|
||||
switch pk.(type) {
|
||||
case *rsa.PublicKey, *ecdsa.PublicKey:
|
||||
@ -190,3 +199,13 @@ func SignedNoteValidator(strToValidate string) bool {
|
||||
s := SignedNote{}
|
||||
return s.UnmarshalText([]byte(strToValidate)) == nil
|
||||
}
|
||||
|
||||
func getPublicKeyHash(publicKey crypto.PublicKey) (uint32, error) {
|
||||
pubKeyBytes, err := x509.MarshalPKIXPublicKey(publicKey)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("marshalling public key: %w", err)
|
||||
}
|
||||
pkSha := sha256.Sum256(pubKeyBytes)
|
||||
hash := binary.BigEndian.Uint32(pkSha[:])
|
||||
return hash, nil
|
||||
}
|
||||
|
2
vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go
generated
vendored
2
vendor/github.com/sigstore/sigstore/pkg/oauthflow/interactive.go
generated
vendored
@ -134,7 +134,7 @@ func (i *InteractiveIDTokenGetter) doOobFlow(cfg *oauth2.Config, stateToken stri
|
||||
fmt.Fprintln(i.GetOutput(), "Go to the following link in a browser:\n\n\t", authURL)
|
||||
fmt.Fprintf(i.GetOutput(), "Enter verification code: ")
|
||||
var code string
|
||||
fmt.Fscanf(i.GetInput(), "%s", &code)
|
||||
_, _ = fmt.Fscanf(i.GetInput(), "%s", &code)
|
||||
// New line in case read input doesn't move cursor to next line.
|
||||
fmt.Fprintln(i.GetOutput())
|
||||
return code
|
||||
|
12
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
12
vendor/github.com/sylabs/sif/v2/pkg/sif/create.go
generated
vendored
@ -23,21 +23,19 @@ var errAlignmentOverflow = errors.New("integer overflow when calculating alignme
|
||||
|
||||
// nextAligned finds the next offset that satisfies alignment.
|
||||
func nextAligned(offset int64, alignment int) (int64, error) {
|
||||
align64 := uint64(alignment)
|
||||
offset64 := uint64(offset)
|
||||
align64 := int64(alignment)
|
||||
|
||||
if align64 <= 0 || offset64%align64 == 0 {
|
||||
if align64 <= 0 || offset%align64 == 0 {
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
offset64 += (align64 - offset64%align64)
|
||||
align64 -= offset % align64
|
||||
|
||||
if offset64 > math.MaxInt64 {
|
||||
if (math.MaxInt64 - offset) < align64 {
|
||||
return 0, errAlignmentOverflow
|
||||
}
|
||||
|
||||
//nolint:gosec // Overflow handled above.
|
||||
return int64(offset64), nil
|
||||
return offset + align64, nil
|
||||
}
|
||||
|
||||
// writeDataObjectAt writes the data object described by di to ws, using time t, recording details
|
||||
|
11
vendor/github.com/vbauerster/mpb/v8/container_option.go
generated
vendored
11
vendor/github.com/vbauerster/mpb/v8/container_option.go
generated
vendored
@ -30,6 +30,17 @@ func WithWidth(width int) ContainerOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithQueueLen sets buffer size of heap manager channel. Ideally it must be
|
||||
// kept at MAX value, where MAX is number of bars to be rendered at the same
|
||||
// time. If len < MAX then backpressure to the scheduler will be increased as
|
||||
// MAX-len extra goroutines will be launched at each render cycle.
|
||||
// Default queue len is 128.
|
||||
func WithQueueLen(len int) ContainerOption {
|
||||
return func(s *pState) {
|
||||
s.hmQueueLen = len
|
||||
}
|
||||
}
|
||||
|
||||
// WithRefreshRate overrides default 150ms refresh rate.
|
||||
func WithRefreshRate(d time.Duration) ContainerOption {
|
||||
return func(s *pState) {
|
||||
|
56
vendor/github.com/vbauerster/mpb/v8/heap_manager.go
generated
vendored
56
vendor/github.com/vbauerster/mpb/v8/heap_manager.go
generated
vendored
@ -10,7 +10,6 @@ const (
|
||||
h_sync heapCmd = iota
|
||||
h_push
|
||||
h_iter
|
||||
h_drain
|
||||
h_fix
|
||||
h_state
|
||||
h_end
|
||||
@ -22,8 +21,9 @@ type heapRequest struct {
|
||||
}
|
||||
|
||||
type iterData struct {
|
||||
iter chan<- *Bar
|
||||
drop <-chan struct{}
|
||||
drop <-chan struct{}
|
||||
iter chan<- *Bar
|
||||
iterPop chan<- *Bar
|
||||
}
|
||||
|
||||
type pushData struct {
|
||||
@ -41,7 +41,7 @@ func (m heapManager) run() {
|
||||
var bHeap priorityQueue
|
||||
var pMatrix, aMatrix map[int][]chan int
|
||||
|
||||
var l int
|
||||
var len int
|
||||
var sync bool
|
||||
|
||||
for req := range m {
|
||||
@ -49,11 +49,9 @@ func (m heapManager) run() {
|
||||
case h_push:
|
||||
data := req.data.(pushData)
|
||||
heap.Push(&bHeap, data.bar)
|
||||
if !sync {
|
||||
sync = data.sync
|
||||
}
|
||||
sync = sync || data.sync
|
||||
case h_sync:
|
||||
if sync || l != bHeap.Len() {
|
||||
if sync || len != bHeap.Len() {
|
||||
pMatrix = make(map[int][]chan int)
|
||||
aMatrix = make(map[int][]chan int)
|
||||
for _, b := range bHeap {
|
||||
@ -66,33 +64,37 @@ func (m heapManager) run() {
|
||||
}
|
||||
}
|
||||
sync = false
|
||||
l = bHeap.Len()
|
||||
len = bHeap.Len()
|
||||
}
|
||||
drop := req.data.(<-chan struct{})
|
||||
syncWidth(pMatrix, drop)
|
||||
syncWidth(aMatrix, drop)
|
||||
case h_iter:
|
||||
data := req.data.(iterData)
|
||||
drop_iter:
|
||||
loop: // unordered iteration
|
||||
for _, b := range bHeap {
|
||||
select {
|
||||
case data.iter <- b:
|
||||
case <-data.drop:
|
||||
break drop_iter
|
||||
data.iterPop = nil
|
||||
break loop
|
||||
}
|
||||
}
|
||||
close(data.iter)
|
||||
case h_drain:
|
||||
data := req.data.(iterData)
|
||||
drop_drain:
|
||||
if data.iterPop == nil {
|
||||
break
|
||||
}
|
||||
loop_pop: // ordered iteration
|
||||
for bHeap.Len() != 0 {
|
||||
bar := heap.Pop(&bHeap).(*Bar)
|
||||
select {
|
||||
case data.iter <- heap.Pop(&bHeap).(*Bar):
|
||||
case data.iterPop <- bar:
|
||||
case <-data.drop:
|
||||
break drop_drain
|
||||
heap.Push(&bHeap, bar)
|
||||
break loop_pop
|
||||
}
|
||||
}
|
||||
close(data.iter)
|
||||
close(data.iterPop)
|
||||
case h_fix:
|
||||
data := req.data.(fixData)
|
||||
if data.bar.index < 0 {
|
||||
@ -104,7 +106,7 @@ func (m heapManager) run() {
|
||||
}
|
||||
case h_state:
|
||||
ch := req.data.(chan<- bool)
|
||||
ch <- sync || l != bHeap.Len()
|
||||
ch <- sync || len != bHeap.Len()
|
||||
case h_end:
|
||||
ch := req.data.(chan<- interface{})
|
||||
if ch != nil {
|
||||
@ -123,19 +125,21 @@ func (m heapManager) sync(drop <-chan struct{}) {
|
||||
|
||||
func (m heapManager) push(b *Bar, sync bool) {
|
||||
data := pushData{b, sync}
|
||||
m <- heapRequest{cmd: h_push, data: data}
|
||||
req := heapRequest{cmd: h_push, data: data}
|
||||
select {
|
||||
case m <- req:
|
||||
default:
|
||||
go func() {
|
||||
m <- req
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func (m heapManager) iter(iter chan<- *Bar, drop <-chan struct{}) {
|
||||
data := iterData{iter, drop}
|
||||
func (m heapManager) iter(drop <-chan struct{}, iter, iterPop chan<- *Bar) {
|
||||
data := iterData{drop, iter, iterPop}
|
||||
m <- heapRequest{cmd: h_iter, data: data}
|
||||
}
|
||||
|
||||
func (m heapManager) drain(iter chan<- *Bar, drop <-chan struct{}) {
|
||||
data := iterData{iter, drop}
|
||||
m <- heapRequest{cmd: h_drain, data: data}
|
||||
}
|
||||
|
||||
func (m heapManager) fix(b *Bar, priority int, lazy bool) {
|
||||
data := fixData{b, priority, lazy}
|
||||
m <- heapRequest{cmd: h_fix, data: data}
|
||||
|
64
vendor/github.com/vbauerster/mpb/v8/progress.go
generated
vendored
64
vendor/github.com/vbauerster/mpb/v8/progress.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
)
|
||||
|
||||
const defaultRefreshRate = 150 * time.Millisecond
|
||||
const defaultHmQueueLength = 128
|
||||
|
||||
// DoneError represents use after `(*Progress).Wait()` error.
|
||||
var DoneError = fmt.Errorf("%T instance can't be reused after %[1]T.Wait()", (*Progress)(nil))
|
||||
@ -31,16 +32,17 @@ type Progress struct {
|
||||
|
||||
// pState holds bars in its priorityQueue, it gets passed to (*Progress).serve monitor goroutine.
|
||||
type pState struct {
|
||||
ctx context.Context
|
||||
hm heapManager
|
||||
dropS, dropD chan struct{}
|
||||
renderReq chan time.Time
|
||||
idCount int
|
||||
popPriority int
|
||||
ctx context.Context
|
||||
hm heapManager
|
||||
iterDrop chan struct{}
|
||||
renderReq chan time.Time
|
||||
idCount int
|
||||
popPriority int
|
||||
|
||||
// following are provided/overrided by user
|
||||
refreshRate time.Duration
|
||||
hmQueueLen int
|
||||
reqWidth int
|
||||
refreshRate time.Duration
|
||||
popCompleted bool
|
||||
autoRefresh bool
|
||||
delayRC <-chan struct{}
|
||||
@ -68,9 +70,8 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
s := &pState{
|
||||
ctx: ctx,
|
||||
hm: make(heapManager),
|
||||
dropS: make(chan struct{}),
|
||||
dropD: make(chan struct{}),
|
||||
hmQueueLen: defaultHmQueueLength,
|
||||
iterDrop: make(chan struct{}),
|
||||
renderReq: make(chan time.Time),
|
||||
popPriority: math.MinInt32,
|
||||
refreshRate: defaultRefreshRate,
|
||||
@ -85,6 +86,8 @@ func NewWithContext(ctx context.Context, options ...ContainerOption) *Progress {
|
||||
}
|
||||
}
|
||||
|
||||
s.hm = make(heapManager, s.hmQueueLen)
|
||||
|
||||
p := &Progress{
|
||||
uwg: s.uwg,
|
||||
operateState: make(chan func(*pState)),
|
||||
@ -173,9 +176,9 @@ func (p *Progress) Add(total int64, filler BarFiller, options ...BarOption) (*Ba
|
||||
}
|
||||
|
||||
func (p *Progress) traverseBars(cb func(b *Bar) bool) {
|
||||
iter, drop := make(chan *Bar), make(chan struct{})
|
||||
drop, iter := make(chan struct{}), make(chan *Bar)
|
||||
select {
|
||||
case p.operateState <- func(s *pState) { s.hm.iter(iter, drop) }:
|
||||
case p.operateState <- func(s *pState) { s.hm.iter(drop, iter, nil) }:
|
||||
for b := range iter {
|
||||
if !cb(b) {
|
||||
close(drop)
|
||||
@ -333,15 +336,15 @@ func (s *pState) manualRefreshListener(done chan struct{}) {
|
||||
}
|
||||
|
||||
func (s *pState) render(cw *cwriter.Writer) (err error) {
|
||||
s.hm.sync(s.dropS)
|
||||
iter := make(chan *Bar)
|
||||
go s.hm.iter(iter, s.dropS)
|
||||
iter, iterPop := make(chan *Bar), make(chan *Bar)
|
||||
s.hm.sync(s.iterDrop)
|
||||
s.hm.iter(s.iterDrop, iter, iterPop)
|
||||
|
||||
var width, height int
|
||||
if cw.IsTerminal() {
|
||||
width, height, err = cw.GetTermSize()
|
||||
if err != nil {
|
||||
close(s.dropS)
|
||||
close(s.iterDrop)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@ -357,23 +360,17 @@ func (s *pState) render(cw *cwriter.Writer) (err error) {
|
||||
go b.render(width)
|
||||
}
|
||||
|
||||
return s.flush(cw, height)
|
||||
return s.flush(cw, height, iterPop)
|
||||
}
|
||||
|
||||
func (s *pState) flush(cw *cwriter.Writer, height int) error {
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait() // waiting for all s.push to complete
|
||||
|
||||
func (s *pState) flush(cw *cwriter.Writer, height int, iter <-chan *Bar) error {
|
||||
var popCount int
|
||||
var rows []io.Reader
|
||||
|
||||
iter := make(chan *Bar)
|
||||
s.hm.drain(iter, s.dropD)
|
||||
|
||||
for b := range iter {
|
||||
frame := <-b.frameCh
|
||||
if frame.err != nil {
|
||||
close(s.dropD)
|
||||
close(s.iterDrop)
|
||||
b.cancel()
|
||||
return frame.err // b.frameCh is buffered it's ok to return here
|
||||
}
|
||||
@ -393,16 +390,13 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error {
|
||||
if qb, ok := s.queueBars[b]; ok {
|
||||
delete(s.queueBars, b)
|
||||
qb.priority = b.priority
|
||||
wg.Add(1)
|
||||
go s.push(&wg, qb, true)
|
||||
s.hm.push(qb, true)
|
||||
} else if s.popCompleted && !frame.noPop {
|
||||
b.priority = s.popPriority
|
||||
s.popPriority++
|
||||
wg.Add(1)
|
||||
go s.push(&wg, b, false)
|
||||
s.hm.push(b, false)
|
||||
} else if !frame.rmOnComplete {
|
||||
wg.Add(1)
|
||||
go s.push(&wg, b, false)
|
||||
s.hm.push(b, false)
|
||||
}
|
||||
case 2:
|
||||
if s.popCompleted && !frame.noPop {
|
||||
@ -411,8 +405,7 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error {
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
wg.Add(1)
|
||||
go s.push(&wg, b, false)
|
||||
s.hm.push(b, false)
|
||||
}
|
||||
}
|
||||
|
||||
@ -426,11 +419,6 @@ func (s *pState) flush(cw *cwriter.Writer, height int) error {
|
||||
return cw.Flush(len(rows) - popCount)
|
||||
}
|
||||
|
||||
func (s *pState) push(wg *sync.WaitGroup, b *Bar, sync bool) {
|
||||
s.hm.push(b, sync)
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func (s pState) makeBarState(total int64, filler BarFiller, options ...BarOption) *bState {
|
||||
bs := &bState{
|
||||
id: s.idCount,
|
||||
|
7
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
generated
vendored
7
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go
generated
vendored
@ -18,13 +18,6 @@ const (
|
||||
WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded)
|
||||
)
|
||||
|
||||
// Server HTTP metrics.
|
||||
const (
|
||||
serverRequestSize = "http.server.request.size" // Incoming request bytes total
|
||||
serverResponseSize = "http.server.response.size" // Incoming response bytes total
|
||||
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
|
||||
)
|
||||
|
||||
// Client HTTP metrics.
|
||||
const (
|
||||
clientRequestSize = "http.client.request.size" // Outgoing request bytes total
|
||||
|
15
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
generated
vendored
15
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go
generated
vendored
@ -8,6 +8,8 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptrace"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
@ -33,8 +35,9 @@ type config struct {
|
||||
SpanNameFormatter func(string, *http.Request) string
|
||||
ClientTrace func(context.Context) *httptrace.ClientTrace
|
||||
|
||||
TracerProvider trace.TracerProvider
|
||||
MeterProvider metric.MeterProvider
|
||||
TracerProvider trace.TracerProvider
|
||||
MeterProvider metric.MeterProvider
|
||||
MetricAttributesFn func(*http.Request) []attribute.KeyValue
|
||||
}
|
||||
|
||||
// Option interface used for setting optional config properties.
|
||||
@ -194,3 +197,11 @@ func WithServerName(server string) Option {
|
||||
c.ServerName = server
|
||||
})
|
||||
}
|
||||
|
||||
// WithMetricAttributesFn returns an Option to set a function that maps an HTTP request to a slice of attribute.KeyValue.
|
||||
// These attributes will be included in metrics for every request.
|
||||
func WithMetricAttributesFn(metricAttributesFn func(r *http.Request) []attribute.KeyValue) Option {
|
||||
return optionFunc(func(c *config) {
|
||||
c.MetricAttributesFn = metricAttributesFn
|
||||
})
|
||||
}
|
||||
|
93
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
generated
vendored
93
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go
generated
vendored
@ -9,11 +9,9 @@ import (
|
||||
|
||||
"github.com/felixge/httpsnoop"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
@ -24,7 +22,6 @@ type middleware struct {
|
||||
server string
|
||||
|
||||
tracer trace.Tracer
|
||||
meter metric.Meter
|
||||
propagators propagation.TextMapPropagator
|
||||
spanStartOptions []trace.SpanStartOption
|
||||
readEvent bool
|
||||
@ -34,10 +31,7 @@ type middleware struct {
|
||||
publicEndpoint bool
|
||||
publicEndpointFn func(*http.Request) bool
|
||||
|
||||
traceSemconv semconv.HTTPServer
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
serverLatencyMeasure metric.Float64Histogram
|
||||
semconv semconv.HTTPServer
|
||||
}
|
||||
|
||||
func defaultHandlerFormatter(operation string, _ *http.Request) string {
|
||||
@ -56,8 +50,6 @@ func NewHandler(handler http.Handler, operation string, opts ...Option) http.Han
|
||||
func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Handler {
|
||||
h := middleware{
|
||||
operation: operation,
|
||||
|
||||
traceSemconv: semconv.NewHTTPServer(),
|
||||
}
|
||||
|
||||
defaultOpts := []Option{
|
||||
@ -67,7 +59,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
|
||||
|
||||
c := newConfig(append(defaultOpts, opts...)...)
|
||||
h.configure(c)
|
||||
h.createMeasures()
|
||||
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
@ -78,7 +69,6 @@ func NewMiddleware(operation string, opts ...Option) func(http.Handler) http.Han
|
||||
|
||||
func (h *middleware) configure(c *config) {
|
||||
h.tracer = c.Tracer
|
||||
h.meter = c.Meter
|
||||
h.propagators = c.Propagators
|
||||
h.spanStartOptions = c.SpanStartOptions
|
||||
h.readEvent = c.ReadEvent
|
||||
@ -88,6 +78,7 @@ func (h *middleware) configure(c *config) {
|
||||
h.publicEndpoint = c.PublicEndpoint
|
||||
h.publicEndpointFn = c.PublicEndpointFn
|
||||
h.server = c.ServerName
|
||||
h.semconv = semconv.NewHTTPServer(c.Meter)
|
||||
}
|
||||
|
||||
func handleErr(err error) {
|
||||
@ -96,30 +87,6 @@ func handleErr(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *middleware) createMeasures() {
|
||||
var err error
|
||||
h.requestBytesCounter, err = h.meter.Int64Counter(
|
||||
serverRequestSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
h.responseBytesCounter, err = h.meter.Int64Counter(
|
||||
serverResponseSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
h.serverLatencyMeasure, err = h.meter.Float64Histogram(
|
||||
serverDuration,
|
||||
metric.WithUnit("ms"),
|
||||
metric.WithDescription("Measures the duration of inbound HTTP requests."),
|
||||
)
|
||||
handleErr(err)
|
||||
}
|
||||
|
||||
// serveHTTP sets up tracing and calls the given next http.Handler with the span
|
||||
// context injected into the request context.
|
||||
func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http.Handler) {
|
||||
@ -134,7 +101,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
|
||||
ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header))
|
||||
opts := []trace.SpanStartOption{
|
||||
trace.WithAttributes(h.traceSemconv.RequestTraceAttrs(h.server, r)...),
|
||||
trace.WithAttributes(h.semconv.RequestTraceAttrs(h.server, r)...),
|
||||
}
|
||||
|
||||
opts = append(opts, h.spanStartOptions...)
|
||||
@ -166,14 +133,12 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
}
|
||||
}
|
||||
|
||||
var bw bodyWrapper
|
||||
// if request body is nil or NoBody, we don't want to mutate the body as it
|
||||
// will affect the identity of it in an unforeseeable way because we assert
|
||||
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
|
||||
bw := request.NewBodyWrapper(r.Body, readRecordFunc)
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
bw.ReadCloser = r.Body
|
||||
bw.record = readRecordFunc
|
||||
r.Body = &bw
|
||||
r.Body = bw
|
||||
}
|
||||
|
||||
writeRecordFunc := func(int64) {}
|
||||
@ -183,13 +148,7 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
}
|
||||
}
|
||||
|
||||
rww := &respWriterWrapper{
|
||||
ResponseWriter: w,
|
||||
record: writeRecordFunc,
|
||||
ctx: ctx,
|
||||
props: h.propagators,
|
||||
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
|
||||
}
|
||||
rww := request.NewRespWriterWrapper(w, writeRecordFunc)
|
||||
|
||||
// Wrap w to use our ResponseWriter methods while also exposing
|
||||
// other interfaces that w may implement (http.CloseNotifier,
|
||||
@ -217,35 +176,35 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http
|
||||
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
|
||||
span.SetStatus(semconv.ServerStatus(rww.statusCode))
|
||||
span.SetAttributes(h.traceSemconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
|
||||
StatusCode: rww.statusCode,
|
||||
ReadBytes: bw.read.Load(),
|
||||
ReadError: bw.err,
|
||||
WriteBytes: rww.written,
|
||||
WriteError: rww.err,
|
||||
statusCode := rww.StatusCode()
|
||||
bytesWritten := rww.BytesWritten()
|
||||
span.SetStatus(h.semconv.Status(statusCode))
|
||||
span.SetAttributes(h.semconv.ResponseTraceAttrs(semconv.ResponseTelemetry{
|
||||
StatusCode: statusCode,
|
||||
ReadBytes: bw.BytesRead(),
|
||||
ReadError: bw.Error(),
|
||||
WriteBytes: bytesWritten,
|
||||
WriteError: rww.Error(),
|
||||
})...)
|
||||
|
||||
// Add metrics
|
||||
attributes := append(labeler.Get(), semconvutil.HTTPServerRequestMetrics(h.server, r)...)
|
||||
if rww.statusCode > 0 {
|
||||
attributes = append(attributes, semconv.HTTPStatusCode(rww.statusCode))
|
||||
}
|
||||
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
|
||||
h.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
|
||||
h.responseBytesCounter.Add(ctx, rww.written, addOpts...)
|
||||
|
||||
// Use floating point division here for higher precision (instead of Millisecond method).
|
||||
elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond)
|
||||
|
||||
h.serverLatencyMeasure.Record(ctx, elapsedTime, o)
|
||||
h.semconv.RecordMetrics(ctx, semconv.MetricData{
|
||||
ServerName: h.server,
|
||||
Req: r,
|
||||
StatusCode: statusCode,
|
||||
AdditionalAttributes: labeler.Get(),
|
||||
RequestSize: bw.BytesRead(),
|
||||
ResponseSize: bytesWritten,
|
||||
ElapsedTime: elapsedTime,
|
||||
})
|
||||
}
|
||||
|
||||
// WithRouteTag annotates spans and metrics with the provided route name
|
||||
// with HTTP route attribute.
|
||||
func WithRouteTag(route string, h http.Handler) http.Handler {
|
||||
attr := semconv.NewHTTPServer().Route(route)
|
||||
attr := semconv.NewHTTPServer(nil).Route(route)
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
span := trace.SpanFromContext(r.Context())
|
||||
span.SetAttributes(attr)
|
||||
|
75
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
generated
vendored
Normal file
75
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go
generated
vendored
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ io.ReadCloser = &BodyWrapper{}
|
||||
|
||||
// BodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
|
||||
// of bytes read and the last error.
|
||||
type BodyWrapper struct {
|
||||
io.ReadCloser
|
||||
OnRead func(n int64) // must not be nil
|
||||
|
||||
mu sync.Mutex
|
||||
read int64
|
||||
err error
|
||||
}
|
||||
|
||||
// NewBodyWrapper creates a new BodyWrapper.
|
||||
//
|
||||
// The onRead attribute is a callback that will be called every time the data
|
||||
// is read, with the number of bytes being read.
|
||||
func NewBodyWrapper(body io.ReadCloser, onRead func(int64)) *BodyWrapper {
|
||||
return &BodyWrapper{
|
||||
ReadCloser: body,
|
||||
OnRead: onRead,
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads the data from the io.ReadCloser, and stores the number of bytes
|
||||
// read and the error.
|
||||
func (w *BodyWrapper) Read(b []byte) (int, error) {
|
||||
n, err := w.ReadCloser.Read(b)
|
||||
n1 := int64(n)
|
||||
|
||||
w.updateReadData(n1, err)
|
||||
w.OnRead(n1)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *BodyWrapper) updateReadData(n int64, err error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.read += n
|
||||
if err != nil {
|
||||
w.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// Closes closes the io.ReadCloser.
|
||||
func (w *BodyWrapper) Close() error {
|
||||
return w.ReadCloser.Close()
|
||||
}
|
||||
|
||||
// BytesRead returns the number of bytes read up to this point.
|
||||
func (w *BodyWrapper) BytesRead() int64 {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
return w.read
|
||||
}
|
||||
|
||||
// Error returns the last error.
|
||||
func (w *BodyWrapper) Error() error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
return w.err
|
||||
}
|
112
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
generated
vendored
Normal file
112
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go
generated
vendored
Normal file
@ -0,0 +1,112 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var _ http.ResponseWriter = &RespWriterWrapper{}
|
||||
|
||||
// RespWriterWrapper wraps a http.ResponseWriter in order to track the number of
|
||||
// bytes written, the last error, and to catch the first written statusCode.
|
||||
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
|
||||
// types (http.Hijacker, http.Pusher, http.CloseNotifier, etc)
|
||||
// that may be useful when using it in real life situations.
|
||||
type RespWriterWrapper struct {
|
||||
http.ResponseWriter
|
||||
OnWrite func(n int64) // must not be nil
|
||||
|
||||
mu sync.RWMutex
|
||||
written int64
|
||||
statusCode int
|
||||
err error
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
// NewRespWriterWrapper creates a new RespWriterWrapper.
|
||||
//
|
||||
// The onWrite attribute is a callback that will be called every time the data
|
||||
// is written, with the number of bytes that were written.
|
||||
func NewRespWriterWrapper(w http.ResponseWriter, onWrite func(int64)) *RespWriterWrapper {
|
||||
return &RespWriterWrapper{
|
||||
ResponseWriter: w,
|
||||
OnWrite: onWrite,
|
||||
statusCode: http.StatusOK, // default status code in case the Handler doesn't write anything
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes the bytes array into the [ResponseWriter], and tracks the
|
||||
// number of bytes written and last error.
|
||||
func (w *RespWriterWrapper) Write(p []byte) (int, error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.writeHeader(http.StatusOK)
|
||||
|
||||
n, err := w.ResponseWriter.Write(p)
|
||||
n1 := int64(n)
|
||||
w.OnWrite(n1)
|
||||
w.written += n1
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteHeader persists initial statusCode for span attribution.
|
||||
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
|
||||
// and will persist the statusCode from the first call.
|
||||
// Blocking consecutive calls to WriteHeader alters expected behavior and will
|
||||
// remove warning logs from net/http where developers will notice incorrect handler implementations.
|
||||
func (w *RespWriterWrapper) WriteHeader(statusCode int) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
|
||||
w.writeHeader(statusCode)
|
||||
}
|
||||
|
||||
// writeHeader persists the status code for span attribution, and propagates
|
||||
// the call to the underlying ResponseWriter.
|
||||
// It does not acquire a lock, and therefore assumes that is being handled by a
|
||||
// parent method.
|
||||
func (w *RespWriterWrapper) writeHeader(statusCode int) {
|
||||
if !w.wroteHeader {
|
||||
w.wroteHeader = true
|
||||
w.statusCode = statusCode
|
||||
}
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// Flush implements [http.Flusher].
|
||||
func (w *RespWriterWrapper) Flush() {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// BytesWritten returns the number of bytes written.
|
||||
func (w *RespWriterWrapper) BytesWritten() int64 {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
return w.written
|
||||
}
|
||||
|
||||
// BytesWritten returns the HTTP status code that was sent.
|
||||
func (w *RespWriterWrapper) StatusCode() int {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
return w.statusCode
|
||||
}
|
||||
|
||||
// Error returns the last error.
|
||||
func (w *RespWriterWrapper) Error() error {
|
||||
w.mu.RLock()
|
||||
defer w.mu.RUnlock()
|
||||
|
||||
return w.err
|
||||
}
|
@ -4,6 +4,7 @@
|
||||
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -11,6 +12,7 @@ import (
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
)
|
||||
|
||||
type ResponseTelemetry struct {
|
||||
@ -23,6 +25,11 @@ type ResponseTelemetry struct {
|
||||
|
||||
type HTTPServer struct {
|
||||
duplicate bool
|
||||
|
||||
// Old metrics
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
serverLatencyMeasure metric.Float64Histogram
|
||||
}
|
||||
|
||||
// RequestTraceAttrs returns trace attributes for an HTTP request received by a
|
||||
@ -63,15 +70,10 @@ func (s HTTPServer) Route(route string) attribute.KeyValue {
|
||||
return oldHTTPServer{}.Route(route)
|
||||
}
|
||||
|
||||
func NewHTTPServer() HTTPServer {
|
||||
env := strings.ToLower(os.Getenv("OTEL_HTTP_CLIENT_COMPATIBILITY_MODE"))
|
||||
return HTTPServer{duplicate: env == "http/dup"}
|
||||
}
|
||||
|
||||
// ServerStatus returns a span status code and message for an HTTP status code
|
||||
// Status returns a span status code and message for an HTTP status code
|
||||
// value returned by a server. Status codes in the 400-499 range are not
|
||||
// returned as errors.
|
||||
func ServerStatus(code int) (codes.Code, string) {
|
||||
func (s HTTPServer) Status(code int) (codes.Code, string) {
|
||||
if code < 100 || code >= 600 {
|
||||
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
|
||||
}
|
||||
@ -80,3 +82,84 @@ func ServerStatus(code int) (codes.Code, string) {
|
||||
}
|
||||
return codes.Unset, ""
|
||||
}
|
||||
|
||||
type MetricData struct {
|
||||
ServerName string
|
||||
Req *http.Request
|
||||
StatusCode int
|
||||
AdditionalAttributes []attribute.KeyValue
|
||||
|
||||
RequestSize int64
|
||||
ResponseSize int64
|
||||
ElapsedTime float64
|
||||
}
|
||||
|
||||
func (s HTTPServer) RecordMetrics(ctx context.Context, md MetricData) {
|
||||
if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil {
|
||||
// This will happen if an HTTPServer{} is used insted of NewHTTPServer.
|
||||
return
|
||||
}
|
||||
|
||||
attributes := oldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes)
|
||||
o := metric.WithAttributeSet(attribute.NewSet(attributes...))
|
||||
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
|
||||
s.requestBytesCounter.Add(ctx, md.RequestSize, addOpts...)
|
||||
s.responseBytesCounter.Add(ctx, md.ResponseSize, addOpts...)
|
||||
s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o)
|
||||
|
||||
// TODO: Duplicate Metrics
|
||||
}
|
||||
|
||||
func NewHTTPServer(meter metric.Meter) HTTPServer {
|
||||
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
|
||||
duplicate := env == "http/dup"
|
||||
server := HTTPServer{
|
||||
duplicate: duplicate,
|
||||
}
|
||||
server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = oldHTTPServer{}.createMeasures(meter)
|
||||
return server
|
||||
}
|
||||
|
||||
type HTTPClient struct {
|
||||
duplicate bool
|
||||
}
|
||||
|
||||
func NewHTTPClient() HTTPClient {
|
||||
env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN"))
|
||||
return HTTPClient{duplicate: env == "http/dup"}
|
||||
}
|
||||
|
||||
// RequestTraceAttrs returns attributes for an HTTP request made by a client.
|
||||
func (c HTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
|
||||
if c.duplicate {
|
||||
return append(oldHTTPClient{}.RequestTraceAttrs(req), newHTTPClient{}.RequestTraceAttrs(req)...)
|
||||
}
|
||||
return oldHTTPClient{}.RequestTraceAttrs(req)
|
||||
}
|
||||
|
||||
// ResponseTraceAttrs returns metric attributes for an HTTP request made by a client.
|
||||
func (c HTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||
if c.duplicate {
|
||||
return append(oldHTTPClient{}.ResponseTraceAttrs(resp), newHTTPClient{}.ResponseTraceAttrs(resp)...)
|
||||
}
|
||||
|
||||
return oldHTTPClient{}.ResponseTraceAttrs(resp)
|
||||
}
|
||||
|
||||
func (c HTTPClient) Status(code int) (codes.Code, string) {
|
||||
if code < 100 || code >= 600 {
|
||||
return codes.Error, fmt.Sprintf("Invalid HTTP status code %d", code)
|
||||
}
|
||||
if code >= 400 {
|
||||
return codes.Error, ""
|
||||
}
|
||||
return codes.Unset, ""
|
||||
}
|
||||
|
||||
func (c HTTPClient) ErrorType(err error) attribute.KeyValue {
|
||||
if c.duplicate {
|
||||
return newHTTPClient{}.ErrorType(err)
|
||||
}
|
||||
|
||||
return attribute.KeyValue{}
|
||||
}
|
||||
|
@ -4,11 +4,14 @@
|
||||
package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
)
|
||||
|
||||
type newHTTPServer struct{}
|
||||
@ -195,3 +198,151 @@ func (n newHTTPServer) ResponseTraceAttrs(resp ResponseTelemetry) []attribute.Ke
|
||||
func (n newHTTPServer) Route(route string) attribute.KeyValue {
|
||||
return semconvNew.HTTPRoute(route)
|
||||
}
|
||||
|
||||
type newHTTPClient struct{}
|
||||
|
||||
// RequestTraceAttrs returns trace attributes for an HTTP request made by a client.
|
||||
func (n newHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
|
||||
/*
|
||||
below attributes are returned:
|
||||
- http.request.method
|
||||
- http.request.method.original
|
||||
- url.full
|
||||
- server.address
|
||||
- server.port
|
||||
- network.protocol.name
|
||||
- network.protocol.version
|
||||
*/
|
||||
numOfAttributes := 3 // URL, server address, proto, and method.
|
||||
|
||||
var urlHost string
|
||||
if req.URL != nil {
|
||||
urlHost = req.URL.Host
|
||||
}
|
||||
var requestHost string
|
||||
var requestPort int
|
||||
for _, hostport := range []string{urlHost, req.Header.Get("Host")} {
|
||||
requestHost, requestPort = splitHostPort(hostport)
|
||||
if requestHost != "" || requestPort > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
eligiblePort := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort)
|
||||
if eligiblePort > 0 {
|
||||
numOfAttributes++
|
||||
}
|
||||
useragent := req.UserAgent()
|
||||
if useragent != "" {
|
||||
numOfAttributes++
|
||||
}
|
||||
|
||||
protoName, protoVersion := netProtocol(req.Proto)
|
||||
if protoName != "" && protoName != "http" {
|
||||
numOfAttributes++
|
||||
}
|
||||
if protoVersion != "" {
|
||||
numOfAttributes++
|
||||
}
|
||||
|
||||
method, originalMethod := n.method(req.Method)
|
||||
if originalMethod != (attribute.KeyValue{}) {
|
||||
numOfAttributes++
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, numOfAttributes)
|
||||
|
||||
attrs = append(attrs, method)
|
||||
if originalMethod != (attribute.KeyValue{}) {
|
||||
attrs = append(attrs, originalMethod)
|
||||
}
|
||||
|
||||
var u string
|
||||
if req.URL != nil {
|
||||
// Remove any username/password info that may be in the URL.
|
||||
userinfo := req.URL.User
|
||||
req.URL.User = nil
|
||||
u = req.URL.String()
|
||||
// Restore any username/password info that was removed.
|
||||
req.URL.User = userinfo
|
||||
}
|
||||
attrs = append(attrs, semconvNew.URLFull(u))
|
||||
|
||||
attrs = append(attrs, semconvNew.ServerAddress(requestHost))
|
||||
if eligiblePort > 0 {
|
||||
attrs = append(attrs, semconvNew.ServerPort(eligiblePort))
|
||||
}
|
||||
|
||||
if protoName != "" && protoName != "http" {
|
||||
attrs = append(attrs, semconvNew.NetworkProtocolName(protoName))
|
||||
}
|
||||
if protoVersion != "" {
|
||||
attrs = append(attrs, semconvNew.NetworkProtocolVersion(protoVersion))
|
||||
}
|
||||
|
||||
return attrs
|
||||
}
|
||||
|
||||
// ResponseTraceAttrs returns trace attributes for an HTTP response made by a client.
|
||||
func (n newHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||
/*
|
||||
below attributes are returned:
|
||||
- http.response.status_code
|
||||
- error.type
|
||||
*/
|
||||
var count int
|
||||
if resp.StatusCode > 0 {
|
||||
count++
|
||||
}
|
||||
|
||||
if isErrorStatusCode(resp.StatusCode) {
|
||||
count++
|
||||
}
|
||||
|
||||
attrs := make([]attribute.KeyValue, 0, count)
|
||||
if resp.StatusCode > 0 {
|
||||
attrs = append(attrs, semconvNew.HTTPResponseStatusCode(resp.StatusCode))
|
||||
}
|
||||
|
||||
if isErrorStatusCode(resp.StatusCode) {
|
||||
errorType := strconv.Itoa(resp.StatusCode)
|
||||
attrs = append(attrs, semconvNew.ErrorTypeKey.String(errorType))
|
||||
}
|
||||
return attrs
|
||||
}
|
||||
|
||||
func (n newHTTPClient) ErrorType(err error) attribute.KeyValue {
|
||||
t := reflect.TypeOf(err)
|
||||
var value string
|
||||
if t.PkgPath() == "" && t.Name() == "" {
|
||||
// Likely a builtin type.
|
||||
value = t.String()
|
||||
} else {
|
||||
value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
|
||||
}
|
||||
|
||||
if value == "" {
|
||||
return semconvNew.ErrorTypeOther
|
||||
}
|
||||
|
||||
return semconvNew.ErrorTypeKey.String(value)
|
||||
}
|
||||
|
||||
func (n newHTTPClient) method(method string) (attribute.KeyValue, attribute.KeyValue) {
|
||||
if method == "" {
|
||||
return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{}
|
||||
}
|
||||
if attr, ok := methodLookup[method]; ok {
|
||||
return attr, attribute.KeyValue{}
|
||||
}
|
||||
|
||||
orig := semconvNew.HTTPRequestMethodOriginal(method)
|
||||
if attr, ok := methodLookup[strings.ToUpper(method)]; ok {
|
||||
return attr, orig
|
||||
}
|
||||
return semconvNew.HTTPRequestMethodGet, orig
|
||||
}
|
||||
|
||||
func isErrorStatusCode(code int) bool {
|
||||
return code >= 400 || code < 100
|
||||
}
|
@ -9,8 +9,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.24.0"
|
||||
semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
)
|
||||
|
||||
// splitHostPort splits a network address hostport of the form "host",
|
||||
@ -49,7 +50,7 @@ func splitHostPort(hostport string) (host string, port int) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return host, int(p)
|
||||
return host, int(p) // nolint: gosec // Byte size checked 16 above.
|
||||
}
|
||||
|
||||
func requiredHTTPPort(https bool, port int) int { // nolint:revive
|
||||
@ -89,3 +90,9 @@ var methodLookup = map[string]attribute.KeyValue{
|
||||
http.MethodPut: semconvNew.HTTPRequestMethodPut,
|
||||
http.MethodTrace: semconvNew.HTTPRequestMethodTrace,
|
||||
}
|
||||
|
||||
func handleErr(err error) {
|
||||
if err != nil {
|
||||
otel.Handle(err)
|
||||
}
|
||||
}
|
||||
|
@ -7,9 +7,13 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/noop"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
|
||||
)
|
||||
|
||||
@ -72,3 +76,117 @@ func (o oldHTTPServer) Route(route string) attribute.KeyValue {
|
||||
func HTTPStatusCode(status int) attribute.KeyValue {
|
||||
return semconv.HTTPStatusCode(status)
|
||||
}
|
||||
|
||||
// Server HTTP metrics.
|
||||
const (
|
||||
serverRequestSize = "http.server.request.size" // Incoming request bytes total
|
||||
serverResponseSize = "http.server.response.size" // Incoming response bytes total
|
||||
serverDuration = "http.server.duration" // Incoming end to end duration, milliseconds
|
||||
)
|
||||
|
||||
func (h oldHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Counter, metric.Int64Counter, metric.Float64Histogram) {
|
||||
if meter == nil {
|
||||
return noop.Int64Counter{}, noop.Int64Counter{}, noop.Float64Histogram{}
|
||||
}
|
||||
var err error
|
||||
requestBytesCounter, err := meter.Int64Counter(
|
||||
serverRequestSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP request messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
responseBytesCounter, err := meter.Int64Counter(
|
||||
serverResponseSize,
|
||||
metric.WithUnit("By"),
|
||||
metric.WithDescription("Measures the size of HTTP response messages."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
serverLatencyMeasure, err := meter.Float64Histogram(
|
||||
serverDuration,
|
||||
metric.WithUnit("ms"),
|
||||
metric.WithDescription("Measures the duration of inbound HTTP requests."),
|
||||
)
|
||||
handleErr(err)
|
||||
|
||||
return requestBytesCounter, responseBytesCounter, serverLatencyMeasure
|
||||
}
|
||||
|
||||
func (o oldHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue {
|
||||
n := len(additionalAttributes) + 3
|
||||
var host string
|
||||
var p int
|
||||
if server == "" {
|
||||
host, p = splitHostPort(req.Host)
|
||||
} else {
|
||||
// Prioritize the primary server name.
|
||||
host, p = splitHostPort(server)
|
||||
if p < 0 {
|
||||
_, p = splitHostPort(req.Host)
|
||||
}
|
||||
}
|
||||
hostPort := requiredHTTPPort(req.TLS != nil, p)
|
||||
if hostPort > 0 {
|
||||
n++
|
||||
}
|
||||
protoName, protoVersion := netProtocol(req.Proto)
|
||||
if protoName != "" {
|
||||
n++
|
||||
}
|
||||
if protoVersion != "" {
|
||||
n++
|
||||
}
|
||||
|
||||
if statusCode > 0 {
|
||||
n++
|
||||
}
|
||||
|
||||
attributes := slices.Grow(additionalAttributes, n)
|
||||
attributes = append(attributes,
|
||||
o.methodMetric(req.Method),
|
||||
o.scheme(req.TLS != nil),
|
||||
semconv.NetHostName(host))
|
||||
|
||||
if hostPort > 0 {
|
||||
attributes = append(attributes, semconv.NetHostPort(hostPort))
|
||||
}
|
||||
if protoName != "" {
|
||||
attributes = append(attributes, semconv.NetProtocolName(protoName))
|
||||
}
|
||||
if protoVersion != "" {
|
||||
attributes = append(attributes, semconv.NetProtocolVersion(protoVersion))
|
||||
}
|
||||
|
||||
if statusCode > 0 {
|
||||
attributes = append(attributes, semconv.HTTPStatusCode(statusCode))
|
||||
}
|
||||
return attributes
|
||||
}
|
||||
|
||||
func (o oldHTTPServer) methodMetric(method string) attribute.KeyValue {
|
||||
method = strings.ToUpper(method)
|
||||
switch method {
|
||||
case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace:
|
||||
default:
|
||||
method = "_OTHER"
|
||||
}
|
||||
return semconv.HTTPMethod(method)
|
||||
}
|
||||
|
||||
func (o oldHTTPServer) scheme(https bool) attribute.KeyValue { // nolint:revive
|
||||
if https {
|
||||
return semconv.HTTPSchemeHTTPS
|
||||
}
|
||||
return semconv.HTTPSchemeHTTP
|
||||
}
|
||||
|
||||
type oldHTTPClient struct{}
|
||||
|
||||
func (o oldHTTPClient) RequestTraceAttrs(req *http.Request) []attribute.KeyValue {
|
||||
return semconvutil.HTTPClientRequest(req)
|
||||
}
|
||||
|
||||
func (o oldHTTPClient) ResponseTraceAttrs(resp *http.Response) []attribute.KeyValue {
|
||||
return semconvutil.HTTPClientResponse(resp)
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func splitHostPort(hostport string) (host string, port int) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return host, int(p)
|
||||
return host, int(p) // nolint: gosec // Bitsize checked to be 16 above.
|
||||
}
|
||||
|
||||
func netProtocol(proto string) (name string, version string) {
|
||||
|
64
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
generated
vendored
64
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go
generated
vendored
@ -11,13 +11,15 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv"
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil"
|
||||
"go.opentelemetry.io/otel"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.20.0"
|
||||
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
@ -26,14 +28,16 @@ import (
|
||||
type Transport struct {
|
||||
rt http.RoundTripper
|
||||
|
||||
tracer trace.Tracer
|
||||
meter metric.Meter
|
||||
propagators propagation.TextMapPropagator
|
||||
spanStartOptions []trace.SpanStartOption
|
||||
filters []Filter
|
||||
spanNameFormatter func(string, *http.Request) string
|
||||
clientTrace func(context.Context) *httptrace.ClientTrace
|
||||
tracer trace.Tracer
|
||||
meter metric.Meter
|
||||
propagators propagation.TextMapPropagator
|
||||
spanStartOptions []trace.SpanStartOption
|
||||
filters []Filter
|
||||
spanNameFormatter func(string, *http.Request) string
|
||||
clientTrace func(context.Context) *httptrace.ClientTrace
|
||||
metricAttributesFn func(*http.Request) []attribute.KeyValue
|
||||
|
||||
semconv semconv.HTTPClient
|
||||
requestBytesCounter metric.Int64Counter
|
||||
responseBytesCounter metric.Int64Counter
|
||||
latencyMeasure metric.Float64Histogram
|
||||
@ -53,7 +57,8 @@ func NewTransport(base http.RoundTripper, opts ...Option) *Transport {
|
||||
}
|
||||
|
||||
t := Transport{
|
||||
rt: base,
|
||||
rt: base,
|
||||
semconv: semconv.NewHTTPClient(),
|
||||
}
|
||||
|
||||
defaultOpts := []Option{
|
||||
@ -76,6 +81,7 @@ func (t *Transport) applyConfig(c *config) {
|
||||
t.filters = c.Filters
|
||||
t.spanNameFormatter = c.SpanNameFormatter
|
||||
t.clientTrace = c.ClientTrace
|
||||
t.metricAttributesFn = c.MetricAttributesFn
|
||||
}
|
||||
|
||||
func (t *Transport) createMeasures() {
|
||||
@ -143,45 +149,49 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
|
||||
r = r.Clone(ctx) // According to RoundTripper spec, we shouldn't modify the origin request.
|
||||
|
||||
// use a body wrapper to determine the request size
|
||||
var bw bodyWrapper
|
||||
// if request body is nil or NoBody, we don't want to mutate the body as it
|
||||
// will affect the identity of it in an unforeseeable way because we assert
|
||||
// ReadCloser fulfills a certain interface and it is indeed nil or NoBody.
|
||||
bw := request.NewBodyWrapper(r.Body, func(int64) {})
|
||||
if r.Body != nil && r.Body != http.NoBody {
|
||||
bw.ReadCloser = r.Body
|
||||
// noop to prevent nil panic. not using this record fun yet.
|
||||
bw.record = func(int64) {}
|
||||
r.Body = &bw
|
||||
r.Body = bw
|
||||
}
|
||||
|
||||
span.SetAttributes(semconvutil.HTTPClientRequest(r)...)
|
||||
span.SetAttributes(t.semconv.RequestTraceAttrs(r)...)
|
||||
t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header))
|
||||
|
||||
res, err := t.rt.RoundTrip(r)
|
||||
if err != nil {
|
||||
span.RecordError(err)
|
||||
// set error type attribute if the error is part of the predefined
|
||||
// error types.
|
||||
// otherwise, record it as an exception
|
||||
if errType := t.semconv.ErrorType(err); errType.Valid() {
|
||||
span.SetAttributes(errType)
|
||||
} else {
|
||||
span.RecordError(err)
|
||||
}
|
||||
|
||||
span.SetStatus(codes.Error, err.Error())
|
||||
span.End()
|
||||
return res, err
|
||||
}
|
||||
|
||||
// metrics
|
||||
metricAttrs := append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...)
|
||||
metricAttrs := append(append(labeler.Get(), semconvutil.HTTPClientRequestMetrics(r)...), t.metricAttributesFromRequest(r)...)
|
||||
if res.StatusCode > 0 {
|
||||
metricAttrs = append(metricAttrs, semconv.HTTPStatusCode(res.StatusCode))
|
||||
}
|
||||
o := metric.WithAttributeSet(attribute.NewSet(metricAttrs...))
|
||||
addOpts := []metric.AddOption{o} // Allocate vararg slice once.
|
||||
t.requestBytesCounter.Add(ctx, bw.read.Load(), addOpts...)
|
||||
|
||||
t.requestBytesCounter.Add(ctx, bw.BytesRead(), o)
|
||||
// For handling response bytes we leverage a callback when the client reads the http response
|
||||
readRecordFunc := func(n int64) {
|
||||
t.responseBytesCounter.Add(ctx, n, addOpts...)
|
||||
t.responseBytesCounter.Add(ctx, n, o)
|
||||
}
|
||||
|
||||
// traces
|
||||
span.SetAttributes(semconvutil.HTTPClientResponse(res)...)
|
||||
span.SetStatus(semconvutil.HTTPClientStatus(res.StatusCode))
|
||||
span.SetAttributes(t.semconv.ResponseTraceAttrs(res)...)
|
||||
span.SetStatus(t.semconv.Status(res.StatusCode))
|
||||
|
||||
res.Body = newWrappedBody(span, readRecordFunc, res.Body)
|
||||
|
||||
@ -193,6 +203,14 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (t *Transport) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue {
|
||||
var attributeForRequest []attribute.KeyValue
|
||||
if t.metricAttributesFn != nil {
|
||||
attributeForRequest = t.metricAttributesFn(r)
|
||||
}
|
||||
return attributeForRequest
|
||||
}
|
||||
|
||||
// newWrappedBody returns a new and appropriately scoped *wrappedBody as an
|
||||
// io.ReadCloser. If the passed body implements io.Writer, the returned value
|
||||
// will implement io.ReadWriteCloser.
|
||||
|
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
2
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go
generated
vendored
@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http
|
||||
|
||||
// Version is the current release version of the otelhttp instrumentation.
|
||||
func Version() string {
|
||||
return "0.53.0"
|
||||
return "0.54.0"
|
||||
// This string is updated by the pre_release.sh script during release
|
||||
}
|
||||
|
||||
|
99
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
generated
vendored
99
vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go
generated
vendored
@ -1,99 +0,0 @@
|
||||
// Copyright The OpenTelemetry Authors
|
||||
// SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync/atomic"
|
||||
|
||||
"go.opentelemetry.io/otel/propagation"
|
||||
)
|
||||
|
||||
var _ io.ReadCloser = &bodyWrapper{}
|
||||
|
||||
// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number
|
||||
// of bytes read and the last error.
|
||||
type bodyWrapper struct {
|
||||
io.ReadCloser
|
||||
record func(n int64) // must not be nil
|
||||
|
||||
read atomic.Int64
|
||||
err error
|
||||
}
|
||||
|
||||
func (w *bodyWrapper) Read(b []byte) (int, error) {
|
||||
n, err := w.ReadCloser.Read(b)
|
||||
n1 := int64(n)
|
||||
w.read.Add(n1)
|
||||
w.err = err
|
||||
w.record(n1)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (w *bodyWrapper) Close() error {
|
||||
return w.ReadCloser.Close()
|
||||
}
|
||||
|
||||
var _ http.ResponseWriter = &respWriterWrapper{}
|
||||
|
||||
// respWriterWrapper wraps a http.ResponseWriter in order to track the number of
|
||||
// bytes written, the last error, and to catch the first written statusCode.
|
||||
// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional
|
||||
// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc)
|
||||
// that may be useful when using it in real life situations.
|
||||
type respWriterWrapper struct {
|
||||
http.ResponseWriter
|
||||
record func(n int64) // must not be nil
|
||||
|
||||
// used to inject the header
|
||||
ctx context.Context
|
||||
|
||||
props propagation.TextMapPropagator
|
||||
|
||||
written int64
|
||||
statusCode int
|
||||
err error
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (w *respWriterWrapper) Header() http.Header {
|
||||
return w.ResponseWriter.Header()
|
||||
}
|
||||
|
||||
func (w *respWriterWrapper) Write(p []byte) (int, error) {
|
||||
if !w.wroteHeader {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
n, err := w.ResponseWriter.Write(p)
|
||||
n1 := int64(n)
|
||||
w.record(n1)
|
||||
w.written += n1
|
||||
w.err = err
|
||||
return n, err
|
||||
}
|
||||
|
||||
// WriteHeader persists initial statusCode for span attribution.
|
||||
// All calls to WriteHeader will be propagated to the underlying ResponseWriter
|
||||
// and will persist the statusCode from the first call.
|
||||
// Blocking consecutive calls to WriteHeader alters expected behavior and will
|
||||
// remove warning logs from net/http where developers will notice incorrect handler implementations.
|
||||
func (w *respWriterWrapper) WriteHeader(statusCode int) {
|
||||
if !w.wroteHeader {
|
||||
w.wroteHeader = true
|
||||
w.statusCode = statusCode
|
||||
}
|
||||
w.ResponseWriter.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
func (w *respWriterWrapper) Flush() {
|
||||
if !w.wroteHeader {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
if f, ok := w.ResponseWriter.(http.Flusher); ok {
|
||||
f.Flush()
|
||||
}
|
||||
}
|
13
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
13
vendor/go.opentelemetry.io/otel/.golangci.yml
generated
vendored
@ -9,6 +9,8 @@ linters:
|
||||
disable-all: true
|
||||
# Specifically enable linters we want to use.
|
||||
enable:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- depguard
|
||||
- errcheck
|
||||
- errorlint
|
||||
@ -23,6 +25,7 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- tenv
|
||||
- testifylint
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
@ -62,12 +65,12 @@ issues:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
# Igonoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
|
||||
# as we commonly use it in tests and examples.
|
||||
- text: "G404:"
|
||||
linters:
|
||||
- gosec
|
||||
# Igonoring gosec G402: TLS MinVersion too low
|
||||
# Ignoring gosec G402: TLS MinVersion too low
|
||||
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
|
||||
- text: "G402: TLS MinVersion too low."
|
||||
linters:
|
||||
@ -300,3 +303,9 @@ linters-settings:
|
||||
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value
|
||||
- name: waitgroup-by-value
|
||||
disabled: false
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable:
|
||||
- float-compare
|
||||
- go-require
|
||||
- require-error
|
||||
|
121
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
121
vendor/go.opentelemetry.io/otel/CHANGELOG.md
generated
vendored
@ -8,6 +8,112 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
<!-- Released section -->
|
||||
<!-- Don't change this section unless doing release -->
|
||||
|
||||
## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
|
||||
|
||||
### Added
|
||||
|
||||
- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
|
||||
- Add `WithExportBufferSize` option to log batch processor.(#5877)
|
||||
|
||||
### Changed
|
||||
|
||||
- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
|
||||
- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
|
||||
- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
|
||||
- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
|
||||
- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
|
||||
- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
|
||||
- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
|
||||
|
||||
### Deprecated
|
||||
|
||||
- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
|
||||
|
||||
### Fixed
|
||||
|
||||
- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
|
||||
- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
|
||||
- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
|
||||
- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
|
||||
- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
|
||||
|
||||
## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
|
||||
|
||||
### Added
|
||||
|
||||
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
|
||||
- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||
- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
|
||||
- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
|
||||
- Fix panic on instruments creation when setting meter provider. (#5758)
|
||||
- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
|
||||
|
||||
### Removed
|
||||
|
||||
- Drop support for [Go 1.21]. (#5736, #5740, #5800)
|
||||
|
||||
## [1.29.0/0.51.0/0.5.0] 2024-08-23
|
||||
|
||||
This release is the last to support [Go 1.21].
|
||||
The next release will require at least [Go 1.22].
|
||||
|
||||
### Added
|
||||
|
||||
- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
|
||||
- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
|
||||
- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
|
||||
This new module contains an OTLP exporter that transmits log telemetry using gRPC.
|
||||
This module is unstable and breaking changes may be introduced.
|
||||
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
|
||||
- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
|
||||
- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
|
||||
- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
|
||||
- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
|
||||
This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
|
||||
It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
|
||||
It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
|
||||
- Support [Go 1.23]. (#5720)
|
||||
|
||||
### Changed
|
||||
|
||||
- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
|
||||
- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
|
||||
- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
|
||||
- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
|
||||
- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
|
||||
- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
|
||||
See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
|
||||
- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
|
||||
- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
|
||||
- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
|
||||
- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
|
||||
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
|
||||
- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
|
||||
- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
|
||||
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
|
||||
- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
|
||||
- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
|
||||
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
|
||||
- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
|
||||
- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
|
||||
- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
|
||||
|
||||
### Removed
|
||||
|
||||
- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
|
||||
- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
|
||||
|
||||
## [1.28.0/0.50.0/0.4.0] 2024-07-02
|
||||
|
||||
### Added
|
||||
@ -49,6 +155,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
|
||||
- Fix stale timestamps reported by the last-value aggregation. (#5517)
|
||||
- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
|
||||
- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
|
||||
- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
|
||||
|
||||
## [1.27.0/0.49.0/0.3.0] 2024-05-21
|
||||
|
||||
@ -175,7 +282,7 @@ The next release will require at least [Go 1.21].
|
||||
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
|
||||
This module is in an alpha state, it is subject to breaking changes.
|
||||
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
|
||||
- ARM64 platform to the compatibility testing suite. (#4994)
|
||||
- Add ARM64 platform to the compatibility testing suite. (#4994)
|
||||
|
||||
### Fixed
|
||||
|
||||
@ -1836,7 +1943,7 @@ with major version 0.
|
||||
- Setting error status while recording error with Span from oteltest package. (#1729)
|
||||
- The concept of a remote and local Span stored in a context is unified to just the current Span.
|
||||
Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
|
||||
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span.
|
||||
Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
|
||||
If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
|
||||
- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
|
||||
This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
|
||||
@ -2410,7 +2517,7 @@ This release migrates the default OpenTelemetry SDK into its own Go module, deco
|
||||
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
|
||||
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
|
||||
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
|
||||
- Update otel-colector example to use the v0.5.0 collector. (#915)
|
||||
- Update otel-collector example to use the v0.5.0 collector. (#915)
|
||||
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
|
||||
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
|
||||
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
|
||||
@ -3003,7 +3110,10 @@ It contains api and sdk for trace and meter.
|
||||
- CircleCI build CI manifest files.
|
||||
- CODEOWNERS file to track owners of this project.
|
||||
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.28.0...HEAD
|
||||
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.31.0...HEAD
|
||||
[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
|
||||
[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
|
||||
[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
|
||||
[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
|
||||
[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
|
||||
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
|
||||
@ -3086,6 +3196,9 @@ It contains api and sdk for trace and meter.
|
||||
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
|
||||
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
|
||||
|
||||
<!-- Released section ended -->
|
||||
|
||||
[Go 1.23]: https://go.dev/doc/go1.23
|
||||
[Go 1.22]: https://go.dev/doc/go1.22
|
||||
[Go 1.21]: https://go.dev/doc/go1.21
|
||||
[Go 1.20]: https://go.dev/doc/go1.20
|
||||
|
6
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
6
vendor/go.opentelemetry.io/otel/CODEOWNERS
generated
vendored
@ -5,13 +5,13 @@
|
||||
#####################################################
|
||||
#
|
||||
# Learn about membership in OpenTelemetry community:
|
||||
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
||||
# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
|
||||
#
|
||||
#
|
||||
# Learn about CODEOWNERS file format:
|
||||
# https://help.github.com/en/articles/about-code-owners
|
||||
#
|
||||
|
||||
* @MrAlias @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
|
||||
* @MrAlias @XSAM @dashpole @pellared @dmathieu
|
||||
|
||||
CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole @XSAM @dmathieu
|
||||
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user