Update c/image from the main branch

> go get github.com/containers/image/v5@main
> make vendor

Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
Miloslav Trmač 2023-04-01 12:19:48 +02:00
parent 4f475bd4d2
commit bfe82593c8
285 changed files with 21881 additions and 4003 deletions

64
go.mod
View File

@ -4,7 +4,7 @@ go 1.18
require (
github.com/containers/common v0.51.2
github.com/containers/image/v5 v5.24.2-0.20230215091257-15e211694ae5
github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920
github.com/containers/ocicrypt v1.1.7
github.com/containers/storage v1.45.4
github.com/docker/distribution v2.8.1+incompatible
@ -16,7 +16,7 @@ require (
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.2
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
golang.org/x/term v0.6.0
gopkg.in/yaml.v3 v3.0.1
)
@ -27,7 +27,7 @@ require (
github.com/Microsoft/hcsshim v0.9.7 // indirect
github.com/VividCortex/ewma v1.2.0 // indirect
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/containerd/cgroups v1.0.4 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.14.1 // indirect
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
@ -35,44 +35,46 @@ require (
github.com/cyberphone/json-canonicalization v0.0.0-20220623050100-57a0ce2678a7 // indirect
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/docker v23.0.1+incompatible // indirect
github.com/docker/docker v23.0.2+incompatible // indirect
github.com/docker/docker-credential-helpers v0.7.0 // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
github.com/go-jose/go-jose/v3 v3.0.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.4 // indirect
github.com/go-openapi/errors v0.20.3 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
github.com/go-openapi/loads v0.21.2 // indirect
github.com/go-openapi/runtime v0.24.1 // indirect
github.com/go-openapi/spec v0.20.7 // indirect
github.com/go-openapi/strfmt v0.21.3 // indirect
github.com/go-openapi/runtime v0.25.0 // indirect
github.com/go-openapi/spec v0.20.8 // indirect
github.com/go-openapi/strfmt v0.21.7 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-openapi/validate v0.22.0 // indirect
github.com/go-playground/locales v0.14.0 // indirect
github.com/go-playground/universal-translator v0.18.0 // indirect
github.com/go-playground/validator/v10 v10.11.1 // indirect
github.com/go-openapi/validate v0.22.1 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.12.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-containerregistry v0.12.1 // indirect
github.com/google/go-containerregistry v0.13.0 // indirect
github.com/google/go-intervals v0.0.2 // indirect
github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 // indirect
github.com/google/trillian v1.5.1 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/mux v1.8.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-retryablehttp v0.7.2 // indirect
github.com/imdario/mergo v0.3.13 // indirect
github.com/imdario/mergo v0.3.15 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.15.15 // indirect
github.com/klauspost/compress v1.16.3 // indirect
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 // indirect
github.com/leodido/go-urn v1.2.1 // indirect
github.com/leodido/go-urn v1.2.2 // indirect
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
@ -92,40 +94,42 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/proglottis/gpgme v0.1.3 // indirect
github.com/rivo/uniseg v0.4.3 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/russross/blackfriday v2.0.0+incompatible // indirect
github.com/segmentio/ksuid v1.0.4 // indirect
github.com/sigstore/fulcio v1.0.0 // indirect
github.com/sigstore/rekor v1.0.1 // indirect
github.com/sigstore/sigstore v1.5.1 // indirect
github.com/sigstore/fulcio v1.1.0 // indirect
github.com/sigstore/rekor v1.1.0 // indirect
github.com/sigstore/sigstore v1.6.0 // indirect
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
github.com/sylabs/sif/v2 v2.9.1 // indirect
github.com/sylabs/sif/v2 v2.11.1 // indirect
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
github.com/theupdateframework/go-tuf v0.5.2 // indirect
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
github.com/ulikunitz/xz v0.5.11 // indirect
github.com/vbatts/tar-split v0.11.2 // indirect
github.com/vbauerster/mpb/v8 v8.1.6 // indirect
github.com/vbatts/tar-split v0.11.3 // indirect
github.com/vbauerster/mpb/v8 v8.3.0 // indirect
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
go.etcd.io/bbolt v1.3.7 // indirect
go.mongodb.org/mongo-driver v1.11.1 // indirect
go.mongodb.org/mongo-driver v1.11.3 // indirect
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
go.opencensus.io v0.24.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/mod v0.8.0 // indirect
go.opentelemetry.io/otel v1.13.0 // indirect
go.opentelemetry.io/otel/trace v1.13.0 // indirect
golang.org/x/crypto v0.7.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/net v0.8.0 // indirect
golang.org/x/oauth2 v0.5.0 // indirect
golang.org/x/oauth2 v0.6.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/tools v0.6.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
google.golang.org/grpc v1.51.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
google.golang.org/grpc v1.54.0 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

164
go.sum
View File

@ -84,8 +84,8 @@ github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kd
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ=
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -108,7 +108,7 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
@ -214,8 +214,8 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
github.com/containers/common v0.51.2 h1:tJ6Nt+zAC6t8nm8qvlVKNjpp/uh3ane80gyj63BwP0Y=
github.com/containers/common v0.51.2/go.mod h1:3W2WIdalgQfrsX/T5tjX+6CxgT3ThJVN2G9sNuFjuCM=
github.com/containers/image/v5 v5.24.2-0.20230215091257-15e211694ae5 h1:dK2J9LpNl52JobfNRkPRAJhc94MJVXaND6s+14qxV6E=
github.com/containers/image/v5 v5.24.2-0.20230215091257-15e211694ae5/go.mod h1:TvwOWxKwwjsvu/T/5hdAOOCYxl2tXTOA+6qnhZmVAnc=
github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920 h1:hycywXvCiW9mISvh9jr2Bv/yei7yz4Epu40EeCWkQR8=
github.com/containers/image/v5 v5.24.3-0.20230401101358-e3437f272920/go.mod h1:nBodKP9+9IjCTME53bROtIOYDkj9GogrA3Nz2icRWGI=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
@ -271,8 +271,8 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY=
github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v23.0.2+incompatible h1:q81C2qQ/EhPm8COZMUGOQYh4qLv4Xu6CXELJ3WK/mlU=
github.com/docker/docker v23.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
@ -328,7 +328,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY=
github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc=
github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo=
@ -351,19 +355,19 @@ github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXym
github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g=
github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro=
github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw=
github.com/go-openapi/runtime v0.24.1 h1:Sml5cgQKGYQHF+M7yYSHaH1eOjvTykrddTE/KtQVjqo=
github.com/go-openapi/runtime v0.24.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk=
github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc=
github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI=
github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU=
github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
github.com/go-openapi/strfmt v0.21.3 h1:xwhj5X6CjXEZZHMWy1zKJxvW9AfHC9pkyUjLvHtKG7o=
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
@ -371,20 +375,17 @@ github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y=
github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-rod/rod v0.112.3 h1:xbSaA9trZ8v/+eJRGOM6exK1RCsLPwwnzA78vpES0gk=
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.12.0 h1:E4gtWgxWxp8YSxExrQFv5BpCahla0PVF2oTTEYaWQGI=
github.com/go-playground/validator/v10 v10.12.0/go.mod h1:hCAPuzYvKdP33pxWa+2+6AIKXEKqjIUyqsNCtbsSJrA=
github.com/go-rod/rod v0.112.6 h1:zMirUmhsBeshMWyf285BD0UGtGq54HfThLDGSjcP3lU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY=
@ -474,8 +475,8 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
github.com/google/go-containerregistry v0.12.1 h1:W1mzdNUTx4Zla4JaixCRLhORcR7G6KxE5hHl5fkPsp8=
github.com/google/go-containerregistry v0.12.1/go.mod h1:sdIK+oHQO7B93xI8UweYdl887YhuIwg9vz8BSLH3+8k=
github.com/google/go-containerregistry v0.13.0 h1:y1C7Z3e149OJbOPDBxLYR8ITPz8dTKqQwjErKVHJC8k=
github.com/google/go-containerregistry v0.13.0/go.mod h1:J9FQ+eSS4a1aC2GNZxvNpbWhgp0487v+cgiilB4FqDo=
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -488,8 +489,8 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9 h1:GFmzYtwUMi1S2mjLxfrJ/CZ9gWDG+zeLtZByg/QEBkk=
github.com/google/trillian v1.5.1-0.20220819043421-0a389c4bb8d9/go.mod h1:vywkS3p2SgNmPL7oAWqU5PiiknzRMp+ol3a19jfY2PQ=
github.com/google/trillian v1.5.1 h1:2p1l13f0eWd7eOShwarwIxutYYnGzY/5S+xYewQIPkU=
github.com/google/trillian v1.5.1/go.mod h1:EcDttN8nf+EoAiyLigBAp9ebncZI6rhJPyxZ+dQ6HSo=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@ -541,8 +542,8 @@ github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@ -574,8 +575,8 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8 h1:BcxbplxjtczA1a6d3wYoa7a0WL3rq9DKBMGHeKyjEF0=
github.com/klauspost/pgzip v1.2.6-0.20220930104621-17e8dac29df8/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@ -585,16 +586,15 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.2 h1:7z68G0FCGvDk646jz1AelTYNYWrTNm0bEcFAo147wt4=
github.com/leodido/go-urn v1.2.2/go.mod h1:kUaIbLZWttglzwNuG0pgsh5vuV6u2YcGBYz1hIPjtOQ=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
@ -634,7 +634,6 @@ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
@ -731,7 +730,6 @@ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/9
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -761,7 +759,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -773,23 +771,21 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/rwtodd/Go.Sed v0.0.0-20210816025313-55464686f9ef/go.mod h1:8AEUvGVi2uQ5b24BIhcr0GCcpd/RNAFWaN2CJFrWIIQ=
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
@ -801,12 +797,12 @@ github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sigstore/fulcio v1.0.0 h1:hBZW6qg9GXTtCX8jOg1hmyjYLrmsEKZGeMwAbW3XNEg=
github.com/sigstore/fulcio v1.0.0/go.mod h1:j4MzLxX/Be0rHYh3JF2dgMorkWGzEMHBqIHwFU8I/Rw=
github.com/sigstore/rekor v1.0.1 h1:rcESXSNkAPRWFYZel9rarspdvneET60F2ngNkadi89c=
github.com/sigstore/rekor v1.0.1/go.mod h1:ecTKdZWGWqE1pl3U1m1JebQJLU/hSjD9vYHOmHQ7w4g=
github.com/sigstore/sigstore v1.5.1 h1:iUou0QJW8eQKMUkTXbFyof9ZOblDtfaW2Sn2+QI8Tcs=
github.com/sigstore/sigstore v1.5.1/go.mod h1:3i6UTWVNtFwOtbgG63FZZNID4vO9KcO8AszIJlaNI8k=
github.com/sigstore/fulcio v1.1.0 h1:mzzJ05Ccu8Y2inyioklNvc8MpzlGHxu8YqNeTm0dHfU=
github.com/sigstore/fulcio v1.1.0/go.mod h1:zv1ZQTXZbUwQdRwajlQksc34pRas+2aZYpIZoQBNev8=
github.com/sigstore/rekor v1.1.0 h1:9fjPvW0WERE7VPtSSVSTbDLLOsrNx3RtiIeZ4/1tmDI=
github.com/sigstore/rekor v1.1.0/go.mod h1:jEOGDGPMURBt9WR50N0rO7X8GZzLE3UQT+ln6BKJ/m0=
github.com/sigstore/sigstore v1.6.0 h1:0fYHVoUlPU3WM8o3U1jT9SI2lqQE68XbG+qWncXaZC8=
github.com/sigstore/sigstore v1.6.0/go.mod h1:+55pf6HZ15kf60c08W+GH95JQbAcnVyUBquQGSVdsto=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
@ -860,8 +856,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/sylabs/sif/v2 v2.9.1 h1:LxF9EcH4hmwSqDBdRv9Tt57YVkvV9rDu66AA/nmns2Y=
github.com/sylabs/sif/v2 v2.9.1/go.mod h1:10lbqUw/uptKH4Z6dRDZl+9Iz7jMiFMDE99eHRJDwOs=
github.com/sylabs/sif/v2 v2.11.1 h1:d09yPukVa8b74wuy+QTA4Is3w8MH0UjO/xlWQUuFzpY=
github.com/sylabs/sif/v2 v2.11.1/go.mod h1:i4GcKLOaT4ertznbsuf11d/G9zLEfUZa7YhrFc5L6YQ=
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
@ -885,11 +881,11 @@ github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME=
github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI=
github.com/vbauerster/mpb/v8 v8.1.6 h1:EswHDkAsy4OQ7QBAmU1MUPz4vHzl6KlINjlh7vJoxvY=
github.com/vbauerster/mpb/v8 v8.1.6/go.mod h1:O9/Wl8X9dUbR63tZ41MLIAxrtNfwlpwUhGkeYugUPW8=
github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8=
github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck=
github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY=
github.com/vbauerster/mpb/v8 v8.3.0 h1:xw2eMJ6v5NP8Rd7yOVzU6OqnRPrS1yWAoLTrWe7W4Nc=
github.com/vbauerster/mpb/v8 v8.3.0/go.mod h1:bngtYUAu25QGxcYYglsF6oyoHlC9Yhh582xF9LjfmL4=
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
@ -935,10 +931,9 @@ go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY=
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
go.mongodb.org/mongo-driver v1.11.1 h1:QP0znIRTuL0jf1oBQoAoM0C6ZJfBK4kx0Uumtv1A7w8=
go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
@ -948,9 +943,15 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y=
go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg=
go.opentelemetry.io/otel/sdk v1.13.0 h1:BHib5g8MvdqS65yo2vV1s6Le42Hm6rrw08qU6yz5JaM=
go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY=
go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -968,13 +969,11 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.7.0 h1:AvwMYaRytfdeVt3u6mLaxYtErKYjxA2OXjJ1HHq6t3A=
golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -985,8 +984,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w=
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@ -1009,8 +1008,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs=
golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -1052,7 +1051,6 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
@ -1064,8 +1062,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk=
golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s=
golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I=
golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw=
golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1152,7 +1150,6 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -1160,10 +1157,10 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
@ -1289,8 +1286,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY=
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
@ -1309,8 +1306,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -1325,8 +1322,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
@ -1336,7 +1333,6 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -1406,7 +1402,7 @@ k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=

View File

@ -454,27 +454,26 @@ func IsCreditCard(str string) bool {
if !rxCreditCard.MatchString(sanitized) {
return false
}
var sum int64
var digit string
var tmpNum int64
var shouldDouble bool
for i := len(sanitized) - 1; i >= 0; i-- {
digit = sanitized[i:(i + 1)]
tmpNum, _ = ToInt(digit)
if shouldDouble {
tmpNum *= 2
if tmpNum >= 10 {
sum += (tmpNum % 10) + 1
} else {
sum += tmpNum
}
} else {
sum += tmpNum
}
shouldDouble = !shouldDouble
}
number, _ := ToInt(sanitized)
number, lastDigit := number / 10, number % 10
return sum%10 == 0
var sum int64
for i:=0; number > 0; i++ {
digit := number % 10
if i % 2 == 0 {
digit *= 2
if digit > 9 {
digit -= 9
}
}
sum += digit
number = number / 10
}
return (sum + lastDigit) % 10 == 0
}
// IsISBN10 checks if the string is an ISBN version 10.

View File

@ -104,12 +104,11 @@ func (ic *imageCopier) copyBlobFromStream(ctx context.Context, srcReader io.Read
if !isConfig {
options.LayerIndex = &layerIndex
}
uploadedInfo, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
destBlob, err := ic.c.dest.PutBlobWithOptions(ctx, &errorAnnotationReader{stream.reader}, stream.info, options)
if err != nil {
return types.BlobInfo{}, fmt.Errorf("writing blob: %w", err)
}
uploadedInfo.Annotations = stream.info.Annotations
uploadedInfo := updatedBlobInfoFromUpload(stream.info, destBlob)
compressionStep.updateCompressionEdits(&uploadedInfo.CompressionOperation, &uploadedInfo.CompressionAlgorithm, &uploadedInfo.Annotations)
decryptionStep.updateCryptoOperation(&uploadedInfo.CryptoOperation)
@ -169,3 +168,20 @@ func (r errorAnnotationReader) Read(b []byte) (n int, err error) {
}
return n, err
}
// updatedBlobInfoFromUpload returns inputInfo updated with uploadedBlob which was created based on inputInfo.
func updatedBlobInfoFromUpload(inputInfo types.BlobInfo, uploadedBlob private.UploadedBlob) types.BlobInfo {
// The transport is only tasked with dealing with the raw blob, and possibly computing Digest/Size.
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
// of the generic code in this package.
return types.BlobInfo{
Digest: uploadedBlob.Digest,
Size: uploadedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
Annotations: inputInfo.Annotations,
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression/Crypto.
CompressionOperation: inputInfo.CompressionOperation, // Expected to be unset, and only updated by copyBlobFromStream.
CompressionAlgorithm: inputInfo.CompressionAlgorithm, // Expected to be unset, and only updated by copyBlobFromStream.
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset, and only updated by copyBlobFromStream.
}
}

View File

@ -6,13 +6,32 @@ import (
"io"
internalblobinfocache "github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/maps"
)
var (
// defaultCompressionFormat is used if the destination transport requests
// compression, and the user does not explicitly instruct us to use an algorithm.
defaultCompressionFormat = &compression.Gzip
// compressionBufferSize is the buffer size used to compress a blob
compressionBufferSize = 1048576
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
// using the algorithm that the media type says it should be compressed with
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
}
)
// bpDetectCompressionStepData contains data that the copy pipeline needs about the “detect compression” step.
type bpDetectCompressionStepData struct {
isCompressed bool
@ -110,13 +129,13 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
logrus.Debugf("Compressing blob on the fly")
var uploadedAlgorithm *compressiontypes.Algorithm
if ic.c.compressionFormat != nil {
uploadedAlgorithm = ic.c.compressionFormat
if ic.compressionFormat != nil {
uploadedAlgorithm = ic.compressionFormat
} else {
uploadedAlgorithm = defaultCompressionFormat
}
reader, annotations := ic.c.compressedStream(stream.reader, *uploadedAlgorithm)
reader, annotations := ic.compressedStream(stream.reader, *uploadedAlgorithm)
// Note: reader must be closed on all return paths.
stream.reader = reader
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
@ -138,7 +157,7 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
ic.c.compressionFormat != nil && ic.c.compressionFormat.Name() != detected.format.Name() {
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
// re-compressed using the desired format.
logrus.Debugf("Blob will be converted")
@ -154,7 +173,7 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
}
}()
recompressed, annotations := ic.c.compressedStream(decompressed, *ic.c.compressionFormat)
recompressed, annotations := ic.compressedStream(decompressed, *ic.compressionFormat)
// Note: recompressed must be closed on all return paths.
stream.reader = recompressed
stream.info = types.BlobInfo{ // FIXME? Should we preserve more data in src.info?
@ -164,10 +183,10 @@ func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bp
succeeded = true
return &bpCompressionStepData{
operation: types.PreserveOriginal,
uploadedAlgorithm: ic.c.compressionFormat,
uploadedAlgorithm: ic.compressionFormat,
uploadedAnnotations: annotations,
srcCompressorName: detected.srcCompressorName,
uploadedCompressorName: ic.c.compressionFormat.Name(),
uploadedCompressorName: ic.compressionFormat.Name(),
closers: []io.Closer{decompressed, recompressed},
}, nil
}
@ -299,24 +318,24 @@ func doCompression(dest io.Writer, src io.Reader, metadata map[string]string, co
}
// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
func (c *copier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
func (ic *imageCopier) compressGoroutine(dest *io.PipeWriter, src io.Reader, metadata map[string]string, compressionFormat compressiontypes.Algorithm) {
err := errors.New("Internal error: unexpected panic in compressGoroutine")
defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
_ = dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
err = doCompression(dest, src, metadata, compressionFormat, c.compressionLevel)
err = doCompression(dest, src, metadata, compressionFormat, ic.compressionLevel)
}
// compressedStream returns a stream the input reader compressed using format, and a metadata map.
// The caller must close the returned reader.
// AFTER the stream is consumed, metadata will be updated with annotations to use on the data.
func (c *copier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
func (ic *imageCopier) compressedStream(reader io.Reader, algorithm compressiontypes.Algorithm) (io.ReadCloser, map[string]string) {
pipeReader, pipeWriter := io.Pipe()
annotations := map[string]string{}
// If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
// e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
// we dont care.
go c.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
go ic.compressGoroutine(pipeWriter, reader, annotations, algorithm) // Closes pipeWriter
return pipeReader, annotations
}

File diff suppressed because it is too large Load Diff

198
vendor/github.com/containers/image/v5/copy/multiple.go generated vendored Normal file
View File

@ -0,0 +1,198 @@
package copy
import (
"bytes"
"context"
"errors"
"fmt"
"strings"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
internalManifest "github.com/containers/image/v5/internal/manifest"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
// copyMultipleImages copies some or all of an image list's instances, using
// policyContext to validate source image admissibility.
func (c *copier) copyMultipleImages(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel *image.UnparsedImage) (copiedManifest []byte, retErr error) {
// Parse the list and get a copy of the original value after it's re-encoded.
manifestList, manifestType, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, fmt.Errorf("reading manifest list: %w", err)
}
originalList, err := internalManifest.ListFromBlob(manifestList, manifestType)
if err != nil {
return nil, fmt.Errorf("parsing manifest list %q: %w", string(manifestList), err)
}
updatedList := originalList.CloneInternal()
sigs, err := c.sourceSignatures(ctx, unparsedToplevel, options,
"Getting image list signatures",
"Checking if image list destination supports signatures")
if err != nil {
return nil, err
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
// expecting, and check that the source manifest matches it.
destIsDigestedReference := false
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
return nil, fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, errors.New("Digest of source image's manifest would not match destination reference")
}
}
}
// Determine if we're allowed to modify the manifest list.
// If we can, set to the empty string. If we can't, set to the reason why.
// Compare, and perhaps keep in sync with, the version in copySingleImage.
cannotModifyManifestListReason := ""
if len(sigs) > 0 {
cannotModifyManifestListReason = "Would invalidate signatures"
}
if destIsDigestedReference {
cannotModifyManifestListReason = "Destination specifies a digest"
}
if options.PreserveDigests {
cannotModifyManifestListReason = "Instructed to preserve digests"
}
// Determine if we'll need to convert the manifest list to a different format.
forceListMIMEType := options.ForceManifestMIMEType
switch forceListMIMEType {
case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType:
forceListMIMEType = manifest.DockerV2ListMediaType
case imgspecv1.MediaTypeImageManifest:
forceListMIMEType = imgspecv1.MediaTypeImageIndex
}
selectedListType, otherManifestMIMETypeCandidates, err := c.determineListConversion(manifestType, c.dest.SupportedManifestMIMETypes(), forceListMIMEType)
if err != nil {
return nil, fmt.Errorf("determining manifest list type to write to destination: %w", err)
}
if selectedListType != originalList.MIMEType() {
if cannotModifyManifestListReason != "" {
return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", selectedListType, cannotModifyManifestListReason)
}
}
// Copy each image, or just the ones we want to copy, in turn.
instanceDigests := updatedList.Instances()
imagesToCopy := len(instanceDigests)
if options.ImageListSelection == CopySpecificImages {
imagesToCopy = len(options.Instances)
}
c.Printf("Copying %d of %d images in list\n", imagesToCopy, len(instanceDigests))
updates := make([]manifest.ListUpdate, len(instanceDigests))
instancesCopied := 0
for i, instanceDigest := range instanceDigests {
if options.ImageListSelection == CopySpecificImages &&
!slices.Contains(options.Instances, instanceDigest) {
update, err := updatedList.Instance(instanceDigest)
if err != nil {
return nil, err
}
logrus.Debugf("Skipping instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
// Record the digest/size/type of the manifest that we didn't copy.
updates[i] = update
continue
}
logrus.Debugf("Copying instance %s (%d/%d)", instanceDigest, i+1, len(instanceDigests))
c.Printf("Copying image %s (%d/%d)\n", instanceDigest, instancesCopied+1, imagesToCopy)
unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceDigest)
updatedManifest, updatedManifestType, updatedManifestDigest, err := c.copySingleImage(ctx, policyContext, options, unparsedToplevel, unparsedInstance, &instanceDigest)
if err != nil {
return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", instancesCopied+1, imagesToCopy, err)
}
instancesCopied++
// Record the result of a possible conversion here.
update := manifest.ListUpdate{
Digest: updatedManifestDigest,
Size: int64(len(updatedManifest)),
MediaType: updatedManifestType,
}
updates[i] = update
}
// Now reset the digest/size/types of the manifests in the list to account for any conversions that we made.
if err = updatedList.UpdateInstances(updates); err != nil {
return nil, fmt.Errorf("updating manifest list: %w", err)
}
// Iterate through supported list types, preferred format first.
c.Printf("Writing manifest list to image destination\n")
var errs []string
for _, thisListType := range append([]string{selectedListType}, otherManifestMIMETypeCandidates...) {
var attemptedList internalManifest.ListPublic = updatedList
logrus.Debugf("Trying to use manifest list type %s…", thisListType)
// Perform the list conversion, if we need one.
if thisListType != updatedList.MIMEType() {
attemptedList, err = updatedList.ConvertToMIMEType(thisListType)
if err != nil {
return nil, fmt.Errorf("converting manifest list to list with MIME type %q: %w", thisListType, err)
}
}
// Check if the updates or a type conversion meaningfully changed the list of images
// by serializing them both so that we can compare them.
attemptedManifestList, err := attemptedList.Serialize()
if err != nil {
return nil, fmt.Errorf("encoding updated manifest list (%q: %#v): %w", updatedList.MIMEType(), updatedList.Instances(), err)
}
originalManifestList, err := originalList.Serialize()
if err != nil {
return nil, fmt.Errorf("encoding original manifest list for comparison (%q: %#v): %w", originalList.MIMEType(), originalList.Instances(), err)
}
// If we can't just use the original value, but we have to change it, flag an error.
if !bytes.Equal(attemptedManifestList, originalManifestList) {
if cannotModifyManifestListReason != "" {
return nil, fmt.Errorf("Manifest list must be converted to type %q to be written to destination, but we cannot modify it: %q", thisListType, cannotModifyManifestListReason)
}
logrus.Debugf("Manifest list has been updated")
} else {
// We can just use the original value, so use it instead of the one we just rebuilt, so that we don't change the digest.
attemptedManifestList = manifestList
}
// Save the manifest list.
err = c.dest.PutManifest(ctx, attemptedManifestList, nil)
if err != nil {
logrus.Debugf("Upload of manifest list type %s failed: %v", thisListType, err)
errs = append(errs, fmt.Sprintf("%s(%v)", thisListType, err))
continue
}
errs = nil
manifestList = attemptedManifestList
break
}
if errs != nil {
return nil, fmt.Errorf("Uploading manifest list failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
// Sign the manifest list.
newSigs, err := c.createSignatures(ctx, manifestList, options.SignIdentity)
if err != nil {
return nil, err
}
sigs = append(sigs, newSigs...)
c.Printf("Storing list signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, nil); err != nil {
return nil, fmt.Errorf("writing signatures: %w", err)
}
return manifestList, nil
}

818
vendor/github.com/containers/image/v5/copy/single.go generated vendored Normal file
View File

@ -0,0 +1,818 @@
package copy
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/image"
"github.com/containers/image/v5/internal/pkg/platform"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/pkg/compression"
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/signature"
"github.com/containers/image/v5/transports"
"github.com/containers/image/v5/types"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
"github.com/vbauerster/mpb/v8"
"golang.org/x/exp/slices"
)
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
c *copier
manifestUpdates *types.ManifestUpdateOptions
src *image.SourcedImage
diffIDsAreNeeded bool
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
canSubstituteBlobs bool
compressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user explicitly requested one, or nil.
compressionLevel *int
ociEncryptLayers *[]int
}
// copySingleImage copies a single (non-manifest-list) image unparsedImage, using policyContext to validate
// source image admissibility.
func (c *copier) copySingleImage(ctx context.Context, policyContext *signature.PolicyContext, options *Options, unparsedToplevel, unparsedImage *image.UnparsedImage, targetInstance *digest.Digest) (retManifest []byte, retManifestType string, retManifestDigest digest.Digest, retErr error) {
// The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list.
// Make sure we fail cleanly in such cases.
multiImage, err := isMultiImage(ctx, unparsedImage)
if err != nil {
// FIXME FIXME: How to name a reference for the sub-image?
return nil, "", "", fmt.Errorf("determining manifest MIME type for %s: %w", transports.ImageName(unparsedImage.Reference()), err)
}
if multiImage {
return nil, "", "", fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image")
}
// Please keep this policy check BEFORE reading any other information about the image.
// (The multiImage check above only matches the MIME type, which we have received anyway.
// Actual parsing of anything should be deferred.)
if allowed, err := policyContext.IsRunningImageAllowed(ctx, unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
return nil, "", "", fmt.Errorf("Source image rejected: %w", err)
}
src, err := image.FromUnparsedImage(ctx, options.SourceCtx, unparsedImage)
if err != nil {
return nil, "", "", fmt.Errorf("initializing image from source %s: %w", transports.ImageName(c.rawSource.Reference()), err)
}
// If the destination is a digested reference, make a note of that, determine what digest value we're
// expecting, and check that the source manifest matches it. If the source manifest doesn't, but it's
// one item from a manifest list that matches it, accept that as a match.
destIsDigestedReference := false
if named := c.dest.Reference().DockerReference(); named != nil {
if digested, ok := named.(reference.Digested); ok {
destIsDigestedReference = true
matches, err := manifest.MatchesDigest(src.ManifestBlob, digested.Digest())
if err != nil {
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
manifestList, _, err := unparsedToplevel.Manifest(ctx)
if err != nil {
return nil, "", "", fmt.Errorf("reading manifest from source image: %w", err)
}
matches, err = manifest.MatchesDigest(manifestList, digested.Digest())
if err != nil {
return nil, "", "", fmt.Errorf("computing digest of source image's manifest: %w", err)
}
if !matches {
return nil, "", "", errors.New("Digest of source image's manifest would not match destination reference")
}
}
}
}
if err := checkImageDestinationForCurrentRuntime(ctx, options.DestinationCtx, src, c.dest); err != nil {
return nil, "", "", err
}
sigs, err := c.sourceSignatures(ctx, src, options,
"Getting image source signatures",
"Checking if image destination supports signatures")
if err != nil {
return nil, "", "", err
}
// Determine if we're allowed to modify the manifest.
// If we can, set to the empty string. If we can't, set to the reason why.
// Compare, and perhaps keep in sync with, the version in copyMultipleImages.
cannotModifyManifestReason := ""
if len(sigs) > 0 {
cannotModifyManifestReason = "Would invalidate signatures"
}
if destIsDigestedReference {
cannotModifyManifestReason = "Destination specifies a digest"
}
if options.PreserveDigests {
cannotModifyManifestReason = "Instructed to preserve digests"
}
ic := imageCopier{
c: c,
manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}},
src: src,
// diffIDsAreNeeded is computed later
cannotModifyManifestReason: cannotModifyManifestReason,
ociEncryptLayers: options.OciEncryptLayers,
}
if options.DestinationCtx != nil {
// Note that compressionFormat and compressionLevel can be nil.
ic.compressionFormat = options.DestinationCtx.CompressionFormat
ic.compressionLevel = options.DestinationCtx.CompressionLevel
}
// Decide whether we can substitute blobs with semantic equivalents:
// - Dont do that if we cant modify the manifest at all
// - Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
// This may be too conservative, but for now, better safe than sorry, _especially_ on the len(c.signers) != 0 path:
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least theres a risk
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
// and we would reuse and sign it.
ic.canSubstituteBlobs = ic.cannotModifyManifestReason == "" && len(c.signers) == 0
if err := ic.updateEmbeddedDockerReference(); err != nil {
return nil, "", "", err
}
destRequiresOciEncryption := (isEncrypted(src) && ic.c.ociDecryptConfig != nil) || options.OciEncryptLayers != nil
manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{
srcMIMEType: ic.src.ManifestMIMEType,
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
forceManifestMIMEType: options.ForceManifestMIMEType,
requiresOCIEncryption: destRequiresOciEncryption,
cannotModifyManifestReason: ic.cannotModifyManifestReason,
})
if err != nil {
return nil, "", "", err
}
// We set up this part of ic.manifestUpdates quite early, not just around the
// code that calls copyUpdatedConfigAndManifest, so that other parts of the copy code
// (e.g. the UpdatedImageNeedsLayerDiffIDs check just below) can make decisions based
// on the expected destination format.
if manifestConversionPlan.preferredMIMETypeNeedsConversion {
ic.manifestUpdates.ManifestMIMEType = manifestConversionPlan.preferredMIMEType
}
// If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here.
ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates)
// If enabled, fetch and compare the destination's manifest. And as an optimization skip updating the destination iff equal
if options.OptimizeDestinationImageAlreadyExists {
shouldUpdateSigs := len(sigs) > 0 || len(c.signers) != 0 // TODO: Consider allowing signatures updates only and skipping the image's layers/manifest copy if possible
noPendingManifestUpdates := ic.noPendingManifestUpdates()
logrus.Debugf("Checking if we can skip copying: has signatures=%t, OCI encryption=%t, no manifest updates=%t", shouldUpdateSigs, destRequiresOciEncryption, noPendingManifestUpdates)
if !shouldUpdateSigs && !destRequiresOciEncryption && noPendingManifestUpdates {
isSrcDestManifestEqual, retManifest, retManifestType, retManifestDigest, err := compareImageDestinationManifestEqual(ctx, options, src, targetInstance, c.dest)
if err != nil {
logrus.Warnf("Failed to compare destination image manifest: %v", err)
return nil, "", "", err
}
if isSrcDestManifestEqual {
c.Printf("Skipping: image already present at destination\n")
return retManifest, retManifestType, retManifestDigest, nil
}
}
}
if err := ic.copyLayers(ctx); err != nil {
return nil, "", "", err
}
// With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
// and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
// without actually trying to upload something and getting a types.ManifestTypeRejectedError.
// So, try the preferred manifest MIME type with possibly-updated blob digests, media types, and sizes if
// we're altering how they're compressed. If the process succeeds, fine…
manifestBytes, retManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
retManifestType = manifestConversionPlan.preferredMIMEType
if err != nil {
logrus.Debugf("Writing manifest using preferred type %s failed: %v", manifestConversionPlan.preferredMIMEType, err)
// … if it fails, and the failure is either because the manifest is rejected by the registry, or
// because we failed to create a manifest of the specified type because the specific manifest type
// doesn't support the type of compression we're trying to use (e.g. docker v2s2 and zstd), we may
// have other options available that could still succeed.
var manifestTypeRejectedError types.ManifestTypeRejectedError
var manifestLayerCompressionIncompatibilityError manifest.ManifestLayerCompressionIncompatibilityError
isManifestRejected := errors.As(err, &manifestTypeRejectedError)
isCompressionIncompatible := errors.As(err, &manifestLayerCompressionIncompatibilityError)
if (!isManifestRejected && !isCompressionIncompatible) || len(manifestConversionPlan.otherMIMETypeCandidates) == 0 {
// We dont have other options.
// In principle the code below would handle this as well, but the resulting error message is fairly ugly.
// Dont bother the user with MIME types if we have no choice.
return nil, "", "", err
}
// If the original MIME type is acceptable, determineManifestConversion always uses it as manifestConversionPlan.preferredMIMEType.
// So if we are here, we will definitely be trying to convert the manifest.
// With ic.cannotModifyManifestReason != "", that would just be a string of repeated failures for the same reason,
// so lets bail out early and with a better error message.
if ic.cannotModifyManifestReason != "" {
return nil, "", "", fmt.Errorf("writing manifest failed and we cannot try conversions: %q: %w", cannotModifyManifestReason, err)
}
// errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
errs := []string{fmt.Sprintf("%s(%v)", manifestConversionPlan.preferredMIMEType, err)}
for _, manifestMIMEType := range manifestConversionPlan.otherMIMETypeCandidates {
logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
ic.manifestUpdates.ManifestMIMEType = manifestMIMEType
attemptedManifest, attemptedManifestDigest, err := ic.copyUpdatedConfigAndManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
continue
}
// We have successfully uploaded a manifest.
manifestBytes = attemptedManifest
retManifestDigest = attemptedManifestDigest
retManifestType = manifestMIMEType
errs = nil // Mark this as a success so that we don't abort below.
break
}
if errs != nil {
return nil, "", "", fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
}
}
if targetInstance != nil {
targetInstance = &retManifestDigest
}
newSigs, err := c.createSignatures(ctx, manifestBytes, options.SignIdentity)
if err != nil {
return nil, "", "", err
}
sigs = append(sigs, newSigs...)
c.Printf("Storing signatures\n")
if err := c.dest.PutSignaturesWithFormat(ctx, sigs, targetInstance); err != nil {
return nil, "", "", fmt.Errorf("writing signatures: %w", err)
}
return manifestBytes, retManifestType, retManifestDigest, nil
}
// checkImageDestinationForCurrentRuntime enforces dest.MustMatchRuntimeOS, if necessary.
func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.SystemContext, src types.Image, dest types.ImageDestination) error {
if dest.MustMatchRuntimeOS() {
c, err := src.OCIConfig(ctx)
if err != nil {
return fmt.Errorf("parsing image configuration: %w", err)
}
wantedPlatforms, err := platform.WantedPlatforms(sys)
if err != nil {
return fmt.Errorf("getting current platform information %#v: %w", sys, err)
}
options := newOrderedSet()
match := false
for _, wantedPlatform := range wantedPlatforms {
// Waiting for https://github.com/opencontainers/image-spec/pull/777 :
// This currently cant use image.MatchesPlatform because we dont know what to use
// for image.Variant.
if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture {
match = true
break
}
options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture))
}
if !match {
logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q",
c.OS, c.Architecture, strings.Join(options.list, ", "))
}
}
return nil
}
// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
func (ic *imageCopier) updateEmbeddedDockerReference() error {
if ic.c.dest.IgnoresEmbeddedDockerReference() {
return nil // Destination would prefer us not to update the embedded reference.
}
destRef := ic.c.dest.Reference().DockerReference()
if destRef == nil {
return nil // Destination does not care about Docker references
}
if !ic.src.EmbeddedDockerReferenceConflicts(destRef) {
return nil // No reference embedded in the manifest, or it matches destRef already.
}
if ic.cannotModifyManifestReason != "" {
return fmt.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would change the manifest, which we cannot do: %q",
transports.ImageName(ic.c.dest.Reference()), destRef.String(), ic.cannotModifyManifestReason)
}
ic.manifestUpdates.EmbeddedDockerReference = destRef
return nil
}
func (ic *imageCopier) noPendingManifestUpdates() bool {
return reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly})
}
// compareImageDestinationManifestEqual compares the `src` and `dest` image manifests (reading the manifest from the
// (possibly remote) destination). Returning true and the destination's manifest, type and digest if they compare equal.
func compareImageDestinationManifestEqual(ctx context.Context, options *Options, src *image.SourcedImage, targetInstance *digest.Digest, dest types.ImageDestination) (bool, []byte, string, digest.Digest, error) {
srcManifestDigest, err := manifest.Digest(src.ManifestBlob)
if err != nil {
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
destImageSource, err := dest.Reference().NewImageSource(ctx, options.DestinationCtx)
if err != nil {
logrus.Debugf("Unable to create destination image %s source: %v", dest.Reference(), err)
return false, nil, "", "", nil
}
destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance)
if err != nil {
logrus.Debugf("Unable to get destination image %s/%s manifest: %v", destImageSource, targetInstance, err)
return false, nil, "", "", nil
}
destManifestDigest, err := manifest.Digest(destManifest)
if err != nil {
return false, nil, "", "", fmt.Errorf("calculating manifest digest: %w", err)
}
logrus.Debugf("Comparing source and destination manifest digests: %v vs. %v", srcManifestDigest, destManifestDigest)
if srcManifestDigest != destManifestDigest {
return false, nil, "", "", nil
}
// Destination and source manifests, types and digests should all be equivalent
return true, destManifest, destManifestType, destManifestDigest, nil
}
// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.cannotModifyManifestReason == "".
func (ic *imageCopier) copyLayers(ctx context.Context) error {
srcInfos := ic.src.LayerInfos()
numLayers := len(srcInfos)
updatedSrcInfos, err := ic.src.LayerInfosForCopy(ctx)
if err != nil {
return err
}
srcInfosUpdated := false
if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) {
if ic.cannotModifyManifestReason != "" {
return fmt.Errorf("Copying this image would require changing layer representation, which we cannot do: %q", ic.cannotModifyManifestReason)
}
srcInfos = updatedSrcInfos
srcInfosUpdated = true
}
type copyLayerData struct {
destInfo types.BlobInfo
diffID digest.Digest
err error
}
// The manifest is used to extract the information whether a given
// layer is empty.
man, err := manifest.FromBlob(ic.src.ManifestBlob, ic.src.ManifestMIMEType)
if err != nil {
return err
}
manifestLayerInfos := man.LayerInfos()
// copyGroup is used to determine if all layers are copied
copyGroup := sync.WaitGroup{}
data := make([]copyLayerData, numLayers)
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
if !ic.c.downloadForeignLayers && ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
// DiffIDs are, currently, needed only when converting from schema1.
// In which case src.LayerInfos will not have URLs because schema1
// does not support them.
if ic.diffIDsAreNeeded {
cld.err = errors.New("getting DiffID for foreign layers is unimplemented")
} else {
cld.destInfo = srcLayer
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
}
} else {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
}
data[index] = cld
}
// Decide which layers to encrypt
layersToEncrypt := set.New[int]()
var encryptAll bool
if ic.ociEncryptLayers != nil {
encryptAll = len(*ic.ociEncryptLayers) == 0
totalLayers := len(srcInfos)
for _, l := range *ic.ociEncryptLayers {
// if layer is negative, it is reverse indexed.
layersToEncrypt.Add((totalLayers + l) % totalLayers)
}
if encryptAll {
for i := 0; i < len(srcInfos); i++ {
layersToEncrypt.Add(i)
}
}
}
if err := func() error { // A scope for defer
progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
// Ensure we wait for all layers to be copied. progressPool.Wait() must not be called while any of the copyLayerHelpers interact with the progressPool.
defer copyGroup.Wait()
for i, srcLayer := range srcInfos {
err = ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1)
if err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying layer: %w", err)
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
}
// A call to copyGroup.Wait() is done at this point by the defer above.
return nil
}(); err != nil {
return err
}
destInfos := make([]types.BlobInfo, numLayers)
diffIDs := make([]digest.Digest, numLayers)
for i, cld := range data {
if cld.err != nil {
return cld.err
}
destInfos[i] = cld.destInfo
diffIDs[i] = cld.diffID
}
// WARNING: If you are adding new reasons to change ic.manifestUpdates, also update the
// OptimizeDestinationImageAlreadyExists short-circuit conditions
ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
if ic.diffIDsAreNeeded {
ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
}
if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) {
ic.manifestUpdates.LayerInfos = destInfos
}
return nil
}
// layerDigestsDiffer returns true iff the digests in a and b differ (ignoring sizes and possible other fields)
func layerDigestsDiffer(a, b []types.BlobInfo) bool {
return !slices.EqualFunc(a, b, func(a, b types.BlobInfo) bool {
return a.Digest == b.Digest
})
}
// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
// stores the resulting config and manifest to the destination, and returns the stored manifest
// and its digest.
func (ic *imageCopier) copyUpdatedConfigAndManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, digest.Digest, error) {
var pendingImage types.Image = ic.src
if !ic.noPendingManifestUpdates() {
if ic.cannotModifyManifestReason != "" {
return nil, "", fmt.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden: %q", ic.cannotModifyManifestReason)
}
if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
// We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
// So, this can only happen if we are trying to upload using one of the other MIME type candidates.
// Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
// when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
// Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
// If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
return nil, "", fmt.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
}
pi, err := ic.src.UpdatedImage(ctx, *ic.manifestUpdates)
if err != nil {
return nil, "", fmt.Errorf("creating an updated image manifest: %w", err)
}
pendingImage = pi
}
man, _, err := pendingImage.Manifest(ctx)
if err != nil {
return nil, "", fmt.Errorf("reading manifest: %w", err)
}
if err := ic.copyConfig(ctx, pendingImage); err != nil {
return nil, "", err
}
ic.c.Printf("Writing manifest to image destination\n")
manifestDigest, err := manifest.Digest(man)
if err != nil {
return nil, "", err
}
if instanceDigest != nil {
instanceDigest = &manifestDigest
}
if err := ic.c.dest.PutManifest(ctx, man, instanceDigest); err != nil {
logrus.Debugf("Error %v while writing manifest %q", err, string(man))
return nil, "", fmt.Errorf("writing manifest: %w", err)
}
return man, manifestDigest, nil
}
// copyConfig copies config.json, if any, from src to dest.
func (ic *imageCopier) copyConfig(ctx context.Context, src types.Image) error {
srcInfo := src.ConfigInfo()
if srcInfo.Digest != "" {
if err := ic.c.concurrentBlobCopiesSemaphore.Acquire(ctx, 1); err != nil {
// This can only fail with ctx.Err(), so no need to blame acquiring the semaphore.
return fmt.Errorf("copying config: %w", err)
}
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
destInfo, err := func() (types.BlobInfo, error) { // A scope for defer
progressPool := ic.c.newProgressPool()
defer progressPool.Wait()
bar := ic.c.createProgressBar(progressPool, false, srcInfo, "config", "done")
defer bar.Abort(false)
ic.c.printCopyInfo("config", srcInfo)
configBlob, err := src.ConfigBlob(ctx)
if err != nil {
return types.BlobInfo{}, fmt.Errorf("reading config blob %s: %w", srcInfo.Digest, err)
}
destInfo, err := ic.copyBlobFromStream(ctx, bytes.NewReader(configBlob), srcInfo, nil, true, false, bar, -1, false)
if err != nil {
return types.BlobInfo{}, err
}
bar.mark100PercentComplete()
return destInfo, nil
}()
if err != nil {
return err
}
if destInfo.Digest != srcInfo.Digest {
return fmt.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
}
}
return nil
}
// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
type diffIDResult struct {
digest digest.Digest
err error
}
// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
// If the srcInfo doesn't contain compression information, try to compute it from the
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
// the BlobInfo we return will be passed to UpdatedImage() and then to UpdateLayerInfos(),
// which uses the compression information to compute the updated MediaType values.
// (Sadly UpdatedImage() is documented to not update MediaTypes from
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
//
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
// package (but we should preferably replace/change UpdatedImage instead of productizing
// this workaround).
if srcInfo.CompressionAlgorithm == nil {
switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
srcInfo.CompressionAlgorithm = &compression.Gzip
case imgspecv1.MediaTypeImageLayerZstd:
srcInfo.CompressionAlgorithm = &compression.Zstd
}
}
ic.c.printCopyInfo("blob", srcInfo)
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
// When encrypting to decrypting, only use the simple code path. We might be able to optimize more
// (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again),
// but its not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, lets not.
encryptingOrDecrypting := toEncrypt || (isOciEncrypted(srcInfo.MediaType) && ic.c.ociDecryptConfig != nil)
canAvoidProcessingCompleteLayer := !diffIDIsNeeded && !encryptingOrDecrypting
// Dont read the layer from the source if we already have the blob, and optimizations are acceptable.
if canAvoidProcessingCompleteLayer {
canChangeLayerCompression := ic.src.CanChangeLayerCompression(srcInfo.MediaType)
logrus.Debugf("Checking if we can reuse blob %s: general substitution = %v, compression for MIME type %q = %v",
srcInfo.Digest, ic.canSubstituteBlobs, srcInfo.MediaType, canChangeLayerCompression)
canSubstitute := ic.canSubstituteBlobs && ic.src.CanChangeLayerCompression(srcInfo.MediaType)
// TODO: at this point we don't know whether or not a blob we end up reusing is compressed using an algorithm
// that is acceptable for use on layers in the manifest that we'll be writing later, so if we end up reusing
// a blob that's compressed with e.g. zstd, but we're only allowed to write a v2s2 manifest, this will cause
// a failure when we eventually try to update the manifest with the digest and MIME type of the reused blob.
// Fixing that will probably require passing more information to TryReusingBlob() than the current version of
// the ImageDestination interface lets us pass in.
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
SrcRef: srcRef,
})
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
}
if reused {
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
func() { // A scope for defer
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
defer bar.Abort(false)
bar.mark100PercentComplete()
}()
// Throw an event that the layer has been skipped
if ic.c.progress != nil && ic.c.progressInterval > 0 {
ic.c.progress <- types.ProgressProperties{
Event: types.ProgressEventSkipped,
Artifact: srcInfo,
}
}
return updatedBlobInfoFromReuse(srcInfo, reusedBlob), cachedDiffID, nil
}
}
// A partial pull is managed by the destination storage, that decides what portions
// of the source file are not known yet and must be fetched.
// Attempt a partial only when the source allows to retrieve a blob partially and
// the destination has support for it.
if canAvoidProcessingCompleteLayer && ic.c.rawSource.SupportsGetBlobAt() && ic.c.dest.SupportsPutBlobPartial() {
if reused, blobInfo := func() (bool, types.BlobInfo) { // A scope for defer
bar := ic.c.createProgressBar(pool, true, srcInfo, "blob", "done")
hideProgressBar := true
defer func() { // Note that this is not the same as defer bar.Abort(hideProgressBar); we need hideProgressBar to be evaluated lazily.
bar.Abort(hideProgressBar)
}()
proxy := blobChunkAccessorProxy{
wrapped: ic.c.rawSource,
bar: bar,
}
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
if err == nil {
if srcInfo.Size != -1 {
bar.SetRefill(srcInfo.Size - bar.Current())
}
bar.mark100PercentComplete()
hideProgressBar = false
logrus.Debugf("Retrieved partial blob %v", srcInfo.Digest)
return true, updatedBlobInfoFromUpload(srcInfo, uploadedBlob)
}
logrus.Debugf("Failed to retrieve partial blob: %v", err)
return false, types.BlobInfo{}
}(); reused {
return blobInfo, cachedDiffID, nil
}
}
// Fallback: copy the layer, computing the diffID if we need to do so
return func() (types.BlobInfo, digest.Digest, error) { // A scope for defer
bar := ic.c.createProgressBar(pool, false, srcInfo, "blob", "done")
defer bar.Abort(false)
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
if err != nil {
return types.BlobInfo{}, "", fmt.Errorf("reading blob %s: %w", srcInfo.Digest, err)
}
defer srcStream.Close()
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
}
diffID := cachedDiffID
if diffIDIsNeeded {
select {
case <-ctx.Done():
return types.BlobInfo{}, "", ctx.Err()
case diffIDResult := <-diffIDChan:
if diffIDResult.err != nil {
return types.BlobInfo{}, "", fmt.Errorf("computing layer DiffID: %w", diffIDResult.err)
}
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
// Dont record any associations that involve encrypted data. This is a bit crude,
// some blob substitutions (replacing pulls of encrypted data with local reuse of known decryption outcomes)
// might be safe, but its not trivially obvious, so lets be conservative for now.
// This crude approach also means we dont need to record whether a blob is encrypted
// in the blob info cache (which would probably be necessary for any more complex logic),
// and the simplicity is attractive.
if !encryptingOrDecrypting {
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
}
diffID = diffIDResult.digest
}
}
bar.mark100PercentComplete()
return blobInfo, diffID, nil
}()
}
// updatedBlobInfoFromReuse returns inputInfo updated with reusedBlob which was created based on inputInfo.
func updatedBlobInfoFromReuse(inputInfo types.BlobInfo, reusedBlob private.ReusedBlob) types.BlobInfo {
// The transport is only tasked with finding the blob, determining its size if necessary, and returning the right
// compression format if the blob was substituted.
// Handling of compression, encryption, and the related MIME types and the like are all the responsibility
// of the generic code in this package.
res := types.BlobInfo{
Digest: reusedBlob.Digest,
Size: reusedBlob.Size,
URLs: nil, // This _must_ be cleared if Digest changes; clear it in other cases as well, to preserve previous behavior.
Annotations: inputInfo.Annotations,
MediaType: inputInfo.MediaType, // Mostly irrelevant, MediaType is updated based on Compression*/CryptoOperation.
CompressionOperation: reusedBlob.CompressionOperation,
CompressionAlgorithm: reusedBlob.CompressionAlgorithm,
CryptoOperation: inputInfo.CryptoOperation, // Expected to be unset anyway.
}
// The transport is only expected to fill CompressionOperation and CompressionAlgorithm
// if the blob was substituted; otherwise, fill it in based
// on what we know from the srcInfos we were given.
if reusedBlob.Digest == inputInfo.Digest {
res.CompressionOperation = inputInfo.CompressionOperation
res.CompressionAlgorithm = inputInfo.CompressionAlgorithm
}
return res
}
// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
// it copies a blob with srcInfo (with known Digest and Annotations and possibly known Size) from srcStream to dest,
// perhaps (de/re/)compressing the stream,
// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
func (ic *imageCopier) copyLayerFromStream(ctx context.Context, srcStream io.Reader, srcInfo types.BlobInfo,
diffIDIsNeeded bool, toEncrypt bool, bar *progressBar, layerIndex int, emptyLayer bool) (types.BlobInfo, <-chan diffIDResult, error) {
var getDiffIDRecorder func(compressiontypes.DecompressorFunc) io.Writer // = nil
var diffIDChan chan diffIDResult
err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithbelow
if diffIDIsNeeded {
diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
pipeReader, pipeWriter := io.Pipe()
defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
_ = pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close(), always returns nil
}()
getDiffIDRecorder = func(decompressor compressiontypes.DecompressorFunc) io.Writer {
// If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
// reading from the pipe has failed, we dont really care.
// We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
// the return value includes an error indication, which we do check.
//
// If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
// closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
return pipeWriter
}
}
blobInfo, err := ic.copyBlobFromStream(ctx, srcStream, srcInfo, getDiffIDRecorder, false, toEncrypt, bar, layerIndex, emptyLayer) // Sets err to nil on success
return blobInfo, diffIDChan, err
// We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
}
// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compressiontypes.DecompressorFunc) {
result := diffIDResult{
digest: "",
err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
}
defer func() { dest <- result }()
defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
result.digest, result.err = computeDiffID(layerStream, decompressor)
}
// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
func computeDiffID(stream io.Reader, decompressor compressiontypes.DecompressorFunc) (digest.Digest, error) {
if decompressor != nil {
s, err := decompressor(stream)
if err != nil {
return "", err
}
defer s.Close()
stream = s
}
return digest.Canonical.FromReader(stream)
}

View File

@ -132,11 +132,11 @@ func (d *dirImageDestination) Close() error {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
blobFile, err := os.CreateTemp(d.ref.path, "dir-put-blob")
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
succeeded := false
explicitClosed := false
@ -153,14 +153,14 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
@ -169,7 +169,7 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
}
@ -178,32 +178,30 @@ func (d *dirImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
succeeded = true
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *dirImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with unknown digest")
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with unknown digest")
}
blobPath := d.ref.layerPath(info.Digest)
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
return false, types.BlobInfo{}, nil
return false, private.ReusedBlob{}, nil
}
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes manifest to the destination.

View File

@ -69,6 +69,7 @@ func newImageDestination(ctx context.Context, sys *types.SystemContext, ref daem
// imageLoadGoroutine accepts tar stream on reader, sends it to c, and reports error or success by writing to statusChannel
func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeReader, statusChannel chan<- error) {
defer c.Close()
err := errors.New("Internal error: unexpected panic in imageLoadGoroutine")
defer func() {
logrus.Debugf("docker-daemon: sending done, status %v", err)

View File

@ -28,6 +28,8 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref daemonRef
if err != nil {
return nil, fmt.Errorf("initializing docker engine client: %w", err)
}
defer c.Close()
// Per NewReference(), ref.StringWithinTransport() is either an image ID (config digest), or a !reference.NameOnly() reference.
// Either way ImageSave should create a tarball with exactly one image.
inputStream, err := c.ImageSave(ctx, []string{ref.StringWithinTransport()})

View File

@ -213,6 +213,7 @@ func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {
// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
// signatureBase is always set in the return value
// The caller must call .Close() on the returned client when done.
func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) {
auth, err := config.GetCredentialsForRef(sys, ref.ref)
if err != nil {
@ -247,6 +248,7 @@ func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, regis
// (e.g., "registry.com[:5000][/some/namespace]/repo").
// Please note that newDockerClient does not set all members of dockerClient
// (e.g., username and password); those must be set by callers if necessary.
// The caller must call .Close() on the returned client when done.
func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) {
hostName := registry
if registry == dockerHostname {
@ -302,6 +304,7 @@ func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password
if err != nil {
return fmt.Errorf("creating new docker client: %w", err)
}
defer client.Close()
client.auth = types.DockerAuthConfig{
Username: username,
Password: password,
@ -371,6 +374,7 @@ func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, ima
if err != nil {
return nil, fmt.Errorf("creating new docker client: %w", err)
}
defer client.Close()
client.auth = auth
if sys != nil {
client.registryToken = sys.DockerBearerRegistryToken
@ -1084,3 +1088,11 @@ func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerRe
func sigstoreAttachmentTag(d digest.Digest) string {
return strings.Replace(d.String(), ":", "-", 1) + ".sig"
}
// Close removes resources associated with an initialized dockerClient, if any.
func (c *dockerClient) Close() error {
if c.client != nil {
c.client.CloseIdleConnections()
}
return nil
}

View File

@ -68,6 +68,7 @@ func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
}
defer client.Close()
tags := make([]string, 0)
@ -136,6 +137,7 @@ func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageRef
if err != nil {
return "", fmt.Errorf("failed to create client: %w", err)
}
defer client.Close()
path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest)
headers := map[string][]string{

View File

@ -93,7 +93,7 @@ func (d *dockerImageDestination) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageDestination, if any.
func (d *dockerImageDestination) Close() error {
return nil
return d.c.Close()
}
// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
@ -132,8 +132,8 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// If requested, precompute the blob digest to prevent uploading layers that already exist on the registry.
// This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests,
// the source blob is uncompressed, and the destination blob is being compressed "on the fly".
@ -141,7 +141,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref))
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
defer cleanup()
stream = streamCopy
@ -152,10 +152,10 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
if haveBlob {
return reusedInfo, nil
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
}
}
@ -164,16 +164,16 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
logrus.Debugf("Uploading %s", uploadPath)
res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusAccepted {
logrus.Debugf("Error initiating layer upload, response %#v", *res)
return types.BlobInfo{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res))
}
uploadLocation, err := res.Location()
if err != nil {
return types.BlobInfo{}, fmt.Errorf("determining upload URL: %w", err)
return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err)
}
digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo)
@ -201,7 +201,7 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
return uploadLocation, nil
}()
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
@ -212,17 +212,17 @@ func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream
uploadLocation.RawQuery = locationQuery.Encode()
res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
defer res.Body.Close()
if res.StatusCode != http.StatusCreated {
logrus.Debugf("Error uploading layer, response %#v", *res)
return types.BlobInfo{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res))
}
logrus.Debugf("Upload of layer %s complete", blobDigest)
options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref))
return types.BlobInfo{Digest: blobDigest, Size: sizeCounter.size}, nil
return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil
}
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
@ -299,34 +299,32 @@ func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo referenc
// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified
// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read.
// The caller must ensure info.Digest is set.
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, types.BlobInfo, error) {
func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) {
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil)
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
if exists {
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
return true, types.BlobInfo{Digest: info.Digest, MediaType: info.MediaType, Size: size}, nil
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
}
return false, types.BlobInfo{}, nil
return false, private.ReusedBlob{}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
// First, check whether the blob happens to already exist at the destination.
haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache)
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
if haveBlob {
return true, reusedInfo, nil
@ -396,10 +394,14 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context,
continue
}
return true, types.BlobInfo{Digest: candidate.Digest, MediaType: info.MediaType, Size: size, CompressionOperation: compressionOperation, CompressionAlgorithm: compressionAlgorithm}, nil
return true, private.ReusedBlob{
Digest: candidate.Digest,
Size: size,
CompressionOperation: compressionOperation,
CompressionAlgorithm: compressionAlgorithm}, nil
}
return false, types.BlobInfo{}, nil
return false, private.ReusedBlob{}, nil
}
// PutManifest writes manifest to the destination.

View File

@ -153,6 +153,7 @@ func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logica
s.Compat = impl.AddCompat(s)
if err := s.ensureManifestIsLoaded(ctx); err != nil {
client.Close()
return nil, err
}
return s, nil
@ -166,7 +167,7 @@ func (s *dockerImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *dockerImageSource) Close() error {
return nil
return s.c.Close()
}
// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
@ -605,6 +606,7 @@ func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerRefere
if err != nil {
return err
}
defer c.Close()
headers := map[string][]string{
"Accept": manifest.DefaultRequestedManifestMIMETypes,

View File

@ -76,15 +76,15 @@ func (d *Destination) AddRepoTags(tags []reference.NamedTagged) {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
// Ouch, we need to stream the blob into a temporary file just to determine the size.
// When the layer is decompressed, we also have to generate the digest on uncompressed data.
if inputInfo.Size == -1 || inputInfo.Digest == "" {
logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.sysCtx, stream, &inputInfo)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
defer cleanup()
stream = streamCopy
@ -92,47 +92,45 @@ func (d *Destination) PutBlobWithOptions(ctx context.Context, stream io.Reader,
}
if err := d.archive.lock(); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
defer d.archive.unlock()
// Maybe the blob has been already sent
ok, reusedInfo, err := d.archive.tryReusingBlobLocked(inputInfo)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
if ok {
return reusedInfo, nil
return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil
}
if options.IsConfig {
buf, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize)
if err != nil {
return types.BlobInfo{}, fmt.Errorf("reading Config file stream: %w", err)
return private.UploadedBlob{}, fmt.Errorf("reading Config file stream: %w", err)
}
d.config = buf
if err := d.archive.sendFileLocked(d.archive.configPath(inputInfo.Digest), inputInfo.Size, bytes.NewReader(buf)); err != nil {
return types.BlobInfo{}, fmt.Errorf("writing Config file: %w", err)
return private.UploadedBlob{}, fmt.Errorf("writing Config file: %w", err)
}
} else {
if err := d.archive.sendFileLocked(d.archive.physicalLayerPath(inputInfo.Digest), inputInfo.Size, stream); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
}
d.archive.recordBlobLocked(types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size})
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
return private.UploadedBlob{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *Destination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if err := d.archive.lock(); err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
defer d.archive.unlock()

View File

@ -13,6 +13,7 @@ import (
"time"
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/private"
"github.com/containers/image/v5/internal/set"
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/types"
@ -69,17 +70,17 @@ func (w *Writer) unlock() {
// tryReusingBlobLocked checks whether the transport already contains, a blob, and if so, returns its metadata.
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, tryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// The caller must have locked the Writer.
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, types.BlobInfo, error) {
func (w *Writer) tryReusingBlobLocked(info types.BlobInfo) (bool, private.ReusedBlob, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
if blob, ok := w.blobs[info.Digest]; ok {
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
return true, private.ReusedBlob{Digest: info.Digest, Size: blob.Size}, nil
}
return false, types.BlobInfo{}, nil
return false, private.ReusedBlob{}, nil
}
// recordBlob records metadata of a recorded blob, which must contain at least a digest and size.

View File

@ -43,10 +43,17 @@ func AddCompat(dest private.ImageDestinationInternalOnly) Compat {
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
return c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{
Cache: blobinfocache.FromBlobInfoCache(cache),
IsConfig: isConfig,
})
if err != nil {
return types.BlobInfo{}, err
}
return types.BlobInfo{
Digest: res.Digest,
Size: res.Size,
}, nil
}
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
@ -59,10 +66,26 @@ func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
// May use and/or update cache.
func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
return c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{
Cache: blobinfocache.FromBlobInfoCache(cache),
CanSubstitute: canSubstitute,
})
if !reused || err != nil {
return reused, types.BlobInfo{}, err
}
res := types.BlobInfo{
Digest: blob.Digest,
Size: blob.Size,
CompressionOperation: blob.CompressionOperation,
CompressionAlgorithm: blob.CompressionAlgorithm,
}
// This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers.
// Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution;
// provide the value in cases where it is likely to be correct.
if blob.Digest == info.Digest {
res.MediaType = info.MediaType
}
return true, res, nil
}
// PutSignatures writes a set of signatures to the destination.

View File

@ -39,8 +39,8 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
return types.BlobInfo{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
}
// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true.

View File

@ -46,20 +46,34 @@ func FromPublic(dest types.ImageDestination) private.ImageDestination {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
return w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (w *wrapped) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
res, err := w.PutBlob(ctx, stream, inputInfo, options.Cache, options.IsConfig)
if err != nil {
return private.UploadedBlob{}, err
}
return private.UploadedBlob{
Digest: res.Digest,
Size: res.Size,
}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
return w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
func (w *wrapped) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
reused, blob, err := w.TryReusingBlob(ctx, info, options.Cache, options.CanSubstitute)
if !reused || err != nil {
return reused, private.ReusedBlob{}, err
}
return true, private.ReusedBlob{
Digest: blob.Digest,
Size: blob.Size,
CompressionOperation: blob.CompressionOperation,
CompressionAlgorithm: blob.CompressionAlgorithm,
}, nil
}
// PutSignaturesWithFormat writes a set of signatures to the destination.

View File

@ -90,6 +90,11 @@ func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error {
return nil
}
func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
// ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list.
return list.ChooseInstance(ctx)
}
// ChooseInstance parses blob as a schema2 manifest list, and returns the digest
// of the image which is appropriate for the current environment.
func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {

View File

@ -51,6 +51,10 @@ type List interface {
ListPublic
// CloneInternal returns a deep copy of this list and its contents.
CloneInternal() List
// ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the
// SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which
// when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined.
ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error)
}
// ListUpdate includes the fields which a List's UpdateInstances() method will modify.

View File

@ -3,6 +3,7 @@ package manifest
import (
"encoding/json"
"fmt"
"math"
"runtime"
platform "github.com/containers/image/v5/internal/pkg/platform"
@ -14,6 +15,16 @@ import (
"golang.org/x/exp/slices"
)
const (
// OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index.
// The value of the annotation must be the string "true".
// If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression.
// That also suggests that this instance benefits from
// Zstd compression, so it can be preferred by compatible consumers over instances that
// use gzip, depending on their local policy.
OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd"
)
// OCI1IndexPublic is just an alias for the OCI index type, but one which we can
// provide methods for.
// This is publicly visible as c/image/manifest.OCI1Index
@ -73,39 +84,92 @@ func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error {
return nil
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
// instanceIsZstd returns true if instance is a zstd instance otherwise false.
func instanceIsZstd(manifest imgspecv1.Descriptor) bool {
if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" {
return true
}
return false
}
type instanceCandidate struct {
platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesnt have a platform
isZstd bool // tells if particular instance if zstd instance
manifestPosition int // A zero-based index of the instance in the manifest list
digest digest.Digest // Instance digest
}
func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip bool) bool {
switch {
case ic.platformIndex != other.platformIndex:
return ic.platformIndex < other.platformIndex
case ic.isZstd != other.isZstd:
if !preferGzip {
return ic.isZstd
} else {
return !ic.isZstd
}
case ic.manifestPosition != other.manifestPosition:
return ic.manifestPosition < other.manifestPosition
}
panic("internal error: invalid comparision between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition.
}
// chooseInstance is a private equivalent to ChooseInstanceByCompression,
// shared by ChooseInstance and ChooseInstanceByCompression.
func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
didPreferGzip := false
if preferGzip == types.OptionalBoolTrue {
didPreferGzip = true
}
wantedPlatforms, err := platform.WantedPlatforms(ctx)
if err != nil {
return "", fmt.Errorf("getting platform information %#v: %w", ctx, err)
}
for _, wantedPlatform := range wantedPlatforms {
for _, d := range index.Manifests {
if d.Platform == nil {
var bestMatch *instanceCandidate
bestMatch = nil
for manifestIndex, d := range index.Manifests {
candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest}
if d.Platform != nil {
foundPlatform := false
for platformIndex, wantedPlatform := range wantedPlatforms {
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
foundPlatform = true
candidate.platformIndex = platformIndex
break
}
}
if !foundPlatform {
continue
}
imagePlatform := imgspecv1.Platform{
Architecture: d.Platform.Architecture,
OS: d.Platform.OS,
OSVersion: d.Platform.OSVersion,
OSFeatures: slices.Clone(d.Platform.OSFeatures),
Variant: d.Platform.Variant,
}
if platform.MatchesPlatform(imagePlatform, wantedPlatform) {
return d.Digest, nil
}
}
if bestMatch == nil || candidate.isPreferredOver(bestMatch, didPreferGzip) {
bestMatch = &candidate
}
}
for _, d := range index.Manifests {
if d.Platform == nil {
return d.Digest, nil
}
if bestMatch != nil {
return bestMatch.digest, nil
}
return "", fmt.Errorf("no image found in image index for architecture %s, variant %q, OS %s", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS)
}
func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) {
return index.chooseInstance(ctx, preferGzip)
}
// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest
// of the image which is appropriate for the current environment.
func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) {
return index.chooseInstance(ctx, types.OptionalBoolFalse)
}
// Serialize returns the index in a blob format.
// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made!
func (index *OCI1IndexPublic) Serialize() ([]byte, error) {

View File

@ -7,6 +7,7 @@ import (
"github.com/containers/image/v5/docker/reference"
"github.com/containers/image/v5/internal/blobinfocache"
"github.com/containers/image/v5/internal/signature"
compression "github.com/containers/image/v5/pkg/compression/types"
"github.com/containers/image/v5/types"
"github.com/opencontainers/go-digest"
)
@ -46,24 +47,22 @@ type ImageDestinationInternalOnly interface {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (types.BlobInfo, error)
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error)
// PutBlobPartial attempts to create a blob using the data that is already present
// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks.
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error)
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, types.BlobInfo, error)
TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error)
// PutSignaturesWithFormat writes a set of signatures to the destination.
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for
@ -79,6 +78,13 @@ type ImageDestination interface {
ImageDestinationInternalOnly
}
// UploadedBlob is information about a blob written to a destination.
// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided.
type UploadedBlob struct {
Digest digest.Digest
Size int64
}
// PutBlobOptions are used in PutBlobWithOptions.
type PutBlobOptions struct {
Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos.
@ -112,6 +118,17 @@ type TryReusingBlobOptions struct {
SrcRef reference.Named // A reference to the source image that contains the input blob.
}
// ReusedBlob is information about a blob reused in a destination.
// It is the subset of types.BlobInfo fields the transport is responsible for setting.
type ReusedBlob struct {
Digest digest.Digest // Must be provided
Size int64 // Must be provided
// The following compression fields should be set when the reuse substitutes
// a differently-compressed blob.
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
}
// ImageSourceChunk is a portion of a blob.
// This API is experimental and can be changed without bumping the major version number.
type ImageSourceChunk struct {

View File

@ -24,7 +24,7 @@ type Signature interface {
blobChunk() ([]byte, error)
}
// BlobChunk returns a representation of sig as a []byte, suitable for long-term storage.
// Blob returns a representation of sig as a []byte, suitable for long-term storage.
func Blob(sig Signature) ([]byte, error) {
chunk, err := sig.blobChunk()
if err != nil {
@ -79,7 +79,7 @@ func FromBlob(blob []byte) (Signature, error) {
case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)):
return SimpleSigningFromBlob(blobChunk), nil
case bytes.Equal(formatBytes, []byte(SigstoreFormat)):
return SigstoreFromBlobChunk(blobChunk)
return sigstoreFromBlobChunk(blobChunk)
default:
return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes))
}

View File

@ -50,8 +50,8 @@ func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, u
}
}
// SigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
func SigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object.
func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) {
var v sigstoreJSONRepresentation
if err := json.Unmarshal(blobChunk, &v); err != nil {
return Sigstore{}, err

View File

@ -109,8 +109,8 @@ func (d *ociArchiveImageDestination) SupportsPutBlobPartial() bool {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
return d.unpackedDest.PutBlobWithOptions(ctx, stream, inputInfo, options)
}
@ -119,18 +119,16 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *ociArchiveImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
return d.unpackedDest.TryReusingBlobWithOptions(ctx, info, options)
}

View File

@ -107,11 +107,11 @@ func (d *ociImageDestination) Close() error {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
blobFile, err := os.CreateTemp(d.ref.dir, "oci-put-blob")
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
succeeded := false
explicitClosed := false
@ -128,14 +128,14 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
// On POSIX systems, blobFile was created with mode 0600, so we need to make it readable.
@ -144,52 +144,50 @@ func (d *ociImageDestination) PutBlobWithOptions(ctx context.Context, stream io.
// always fails on Windows.
if runtime.GOOS != "windows" {
if err := blobFile.Chmod(0644); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
}
blobPath, err := d.ref.blobPath(blobDigest, d.sharedBlobDir)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
if err := ensureParentDirectoryExists(blobPath); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
// need to explicitly close the file, since a rename won't otherwise not work on Windows
blobFile.Close()
explicitClosed = true
if err := os.Rename(blobFile.Name(), blobPath); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
succeeded = true
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *ociImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if info.Digest == "" {
return false, types.BlobInfo{}, errors.New("Can not check for a blob with unknown digest")
return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest")
}
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
finfo, err := os.Stat(blobPath)
if err != nil && os.IsNotExist(err) {
return false, types.BlobInfo{}, nil
return false, private.ReusedBlob{}, nil
}
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
return true, private.ReusedBlob{Digest: info.Digest, Size: finfo.Size()}, nil
}
// PutManifest writes a manifest to the destination. Per our list of supported manifest MIME types,

View File

@ -94,6 +94,7 @@ func (s *ociImageSource) Reference() types.ImageReference {
// Close removes resources associated with an initialized ImageSource, if any.
func (s *ociImageSource) Close() error {
s.client.CloseIdleConnections()
return nil
}

View File

@ -116,8 +116,8 @@ func (d *openshiftImageDestination) SupportsPutBlobPartial() bool {
// inputInfo.MediaType describes the blob format, if known.
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far.
func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
return d.docker.PutBlobWithOptions(ctx, stream, inputInfo, options)
}
@ -126,18 +126,16 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *openshiftImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
return d.docker.TryReusingBlobWithOptions(ctx, info, options)
}

View File

@ -69,7 +69,7 @@ type manifestSchema struct {
}
type ostreeImageDestination struct {
compat impl.Compat
impl.Compat
impl.PropertyMethodsInitialize
stubs.NoPutBlobPartialInitialize
stubs.AlwaysSupportsSignatures
@ -135,16 +135,16 @@ func (d *ostreeImageDestination) Close() error {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
tmpDir, err := os.MkdirTemp(d.tmpDirPath, "blob")
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
blobPath := filepath.Join(tmpDir, "content")
blobFile, err := os.Create(blobPath)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
defer blobFile.Close()
@ -152,19 +152,19 @@ func (d *ostreeImageDestination) PutBlobWithOptions(ctx context.Context, stream
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
size, err := io.Copy(blobFile, stream)
if err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
blobDigest := digester.Digest()
if inputInfo.Size != -1 && size != inputInfo.Size {
return types.BlobInfo{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
return private.UploadedBlob{}, fmt.Errorf("Size mismatch when copying %s, expected %d, got %d", blobDigest, inputInfo.Size, size)
}
if err := blobFile.Sync(); err != nil {
return types.BlobInfo{}, err
return private.UploadedBlob{}, err
}
hash := blobDigest.Hex()
d.blobs[hash] = &blobToImport{Size: size, Digest: blobDigest, BlobPath: blobPath}
return types.BlobInfo{Digest: blobDigest, Size: size}, nil
return private.UploadedBlob{Digest: blobDigest, Size: size}, nil
}
func fixFiles(selinuxHnd *C.struct_selabel_handle, root string, dir string, usermode bool) error {
@ -334,11 +334,11 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
if d.repo == nil {
repo, err := openRepo(d.ref.repo)
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
d.repo = repo
}
@ -346,25 +346,25 @@ func (d *ostreeImageDestination) TryReusingBlobWithOptions(ctx context.Context,
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
if err != nil || !found {
return found, types.BlobInfo{}, err
return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
if err != nil || !found {
return found, types.BlobInfo{}, err
return found, private.ReusedBlob{}, err
}
found, data, err = readMetadata(d.repo, branch, "docker.size")
if err != nil || !found {
return found, types.BlobInfo{}, err
return found, private.ReusedBlob{}, err
}
size, err := strconv.ParseInt(data, 10, 64)
if err != nil {
return false, types.BlobInfo{}, err
return false, private.ReusedBlob{}, err
}
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil
}
// PutManifest writes manifest to the destination.

View File

@ -91,14 +91,13 @@ func NewTransport() *http.Transport {
direct := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: direct.DialContext,
TLSHandshakeTimeout: 10 * time.Second,
// TODO(dmcgowan): Call close idle connections when complete and use keep alive
DisableKeepAlives: true,
IdleConnTimeout: 90 * time.Second,
MaxIdleConns: 100,
}
return tr
}

View File

@ -11,6 +11,7 @@ import (
"github.com/containers/image/v5/manifest"
"github.com/containers/image/v5/signature/internal"
"github.com/opencontainers/go-digest"
"golang.org/x/exp/slices"
)
// SignOptions includes optional parameters for signing container images.
@ -50,15 +51,26 @@ func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism,
// using mech.
func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
sig, _, err := VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest, expectedDockerReference, mech, []string{expectedKeyIdentity})
return sig, err
}
// VerifyImageManifestSignatureUsingKeyIdentityList checks that unverifiedSignature uses one of the expectedKeyIdentities
// to sign unverifiedManifest as expectedDockerReference, using mech. Returns the verified signature and the key identity that
// was used to verify it.
func VerifyImageManifestSignatureUsingKeyIdentityList(unverifiedSignature, unverifiedManifest []byte,
expectedDockerReference string, mech SigningMechanism, expectedKeyIdentities []string) (*Signature, string, error) {
expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
if err != nil {
return nil, err
return nil, "", err
}
var matchedKeyIdentity string
sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
validateKeyIdentity: func(keyIdentity string) error {
if keyIdentity != expectedKeyIdentity {
return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity))
if !slices.Contains(expectedKeyIdentities, keyIdentity) {
return internal.NewInvalidSignatureError(fmt.Sprintf("Signature by %s does not match expected fingerprints %v", keyIdentity, expectedKeyIdentities))
}
matchedKeyIdentity = keyIdentity
return nil
},
validateSignedDockerReference: func(signedDockerReference string) error {
@ -84,7 +96,7 @@ func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byt
},
})
if err != nil {
return nil, err
return nil, "", err
}
return sig, nil
return sig, matchedKeyIdentity, err
}

View File

@ -21,14 +21,14 @@ const (
// UntrustedSigstorePayload is a parsed content of a sigstore signature payload (not the full signature)
type UntrustedSigstorePayload struct {
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
untrustedDockerManifestDigest digest.Digest
untrustedDockerReference string // FIXME: more precise type?
untrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
UntrustedTimestamp *int64
untrustedTimestamp *int64
}
// NewUntrustedSigstorePayload returns an UntrustedSigstorePayload object with
@ -39,10 +39,10 @@ func NewUntrustedSigstorePayload(dockerManifestDigest digest.Digest, dockerRefer
creatorID := "containers/image " + version.Version
timestamp := time.Now().Unix()
return UntrustedSigstorePayload{
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: &timestamp,
untrustedDockerManifestDigest: dockerManifestDigest,
untrustedDockerReference: dockerReference,
untrustedCreatorID: &creatorID,
untrustedTimestamp: &timestamp,
}
}
@ -52,20 +52,20 @@ var _ json.Marshaler = (*UntrustedSigstorePayload)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s UntrustedSigstorePayload) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]any{
"type": sigstoreSignatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
}
optional := map[string]any{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
if s.untrustedCreatorID != nil {
optional["creator"] = *s.untrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
if s.untrustedTimestamp != nil {
optional["timestamp"] = *s.untrustedTimestamp
}
signature := map[string]any{
"critical": critical,
@ -121,14 +121,14 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}
}
if gotCreatorID {
s.UntrustedCreatorID = &creatorID
s.untrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.UntrustedTimestamp = &intTimestamp
s.untrustedTimestamp = &intTimestamp
}
var t string
@ -150,10 +150,10 @@ func (s *UntrustedSigstorePayload) strictUnmarshalJSON(data []byte) error {
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
s.untrustedDockerManifestDigest = digest.Digest(digestString)
return ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.UntrustedDockerReference,
"docker-reference": &s.untrustedDockerReference,
})
}
@ -191,10 +191,10 @@ func VerifySigstorePayload(publicKey crypto.PublicKey, unverifiedPayload []byte,
if err := json.Unmarshal(unverifiedPayload, &unmatchedPayload); err != nil {
return nil, NewInvalidSignatureError(err.Error())
}
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.UntrustedDockerManifestDigest); err != nil {
if err := rules.ValidateSignedDockerManifestDigest(unmatchedPayload.untrustedDockerManifestDigest); err != nil {
return nil, err
}
if err := rules.ValidateSignedDockerReference(unmatchedPayload.UntrustedDockerReference); err != nil {
if err := rules.ValidateSignedDockerReference(unmatchedPayload.untrustedDockerReference); err != nil {
return nil, err
}
// SigstorePayloadAcceptanceRules have accepted this value.

View File

@ -33,8 +33,10 @@ import (
// limitations under the License.
const (
// from sigstore/cosign/pkg/cosign.sigstorePrivateKeyPemType.
sigstorePrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
// from sigstore/cosign/pkg/cosign.CosignPrivateKeyPemType.
cosignPrivateKeyPemType = "ENCRYPTED COSIGN PRIVATE KEY"
// from sigstore/cosign/pkg/cosign.SigstorePrivateKeyPemType.
sigstorePrivateKeyPemType = "ENCRYPTED SIGSTORE PRIVATE KEY"
)
// from sigstore/cosign/pkg/cosign.loadPrivateKey
@ -45,7 +47,7 @@ func loadPrivateKey(key []byte, pass []byte) (signature.SignerVerifier, error) {
if p == nil {
return nil, errors.New("invalid pem block")
}
if p.Type != sigstorePrivateKeyPemType {
if p.Type != sigstorePrivateKeyPemType && p.Type != cosignPrivateKeyPemType {
return nil, fmt.Errorf("unsupported pem type: %s", p.Type)
}
@ -86,7 +88,9 @@ func marshalKeyPair(privateKey crypto.PrivateKey, publicKey crypto.PublicKey, pa
// store in PEM format
privBytes := pem.EncodeToMemory(&pem.Block{
Bytes: encBytes,
Type: sigstorePrivateKeyPemType,
// Use the older “COSIGN” type name; as of 2023-03-30 cosigns main branch generates “SIGSTORE” types,
// but a version of cosign that can accept them has not yet been released.
Type: cosignPrivateKeyPemType,
})
// Now do the public key

View File

@ -31,14 +31,14 @@ type Signature struct {
// untrustedSignature is a parsed content of a signature.
type untrustedSignature struct {
UntrustedDockerManifestDigest digest.Digest
UntrustedDockerReference string // FIXME: more precise type?
UntrustedCreatorID *string
untrustedDockerManifestDigest digest.Digest
untrustedDockerReference string // FIXME: more precise type?
untrustedCreatorID *string
// This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
// but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
// So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
// we would add another field, UntrustedTimestampNS int64.
UntrustedTimestamp *int64
untrustedTimestamp *int64
}
// UntrustedSignatureInformation is information available in an untrusted signature.
@ -65,10 +65,10 @@ func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference s
creatorID := "atomic " + version.Version
timestamp := time.Now().Unix()
return untrustedSignature{
UntrustedDockerManifestDigest: dockerManifestDigest,
UntrustedDockerReference: dockerReference,
UntrustedCreatorID: &creatorID,
UntrustedTimestamp: &timestamp,
untrustedDockerManifestDigest: dockerManifestDigest,
untrustedDockerReference: dockerReference,
untrustedCreatorID: &creatorID,
untrustedTimestamp: &timestamp,
}
}
@ -78,20 +78,20 @@ var _ json.Marshaler = (*untrustedSignature)(nil)
// MarshalJSON implements the json.Marshaler interface.
func (s untrustedSignature) MarshalJSON() ([]byte, error) {
if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
if s.untrustedDockerManifestDigest == "" || s.untrustedDockerReference == "" {
return nil, errors.New("Unexpected empty signature content")
}
critical := map[string]any{
"type": signatureType,
"image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
"image": map[string]string{"docker-manifest-digest": s.untrustedDockerManifestDigest.String()},
"identity": map[string]string{"docker-reference": s.untrustedDockerReference},
}
optional := map[string]any{}
if s.UntrustedCreatorID != nil {
optional["creator"] = *s.UntrustedCreatorID
if s.untrustedCreatorID != nil {
optional["creator"] = *s.untrustedCreatorID
}
if s.UntrustedTimestamp != nil {
optional["timestamp"] = *s.UntrustedTimestamp
if s.untrustedTimestamp != nil {
optional["timestamp"] = *s.untrustedTimestamp
}
signature := map[string]any{
"critical": critical,
@ -144,14 +144,14 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
return err
}
if gotCreatorID {
s.UntrustedCreatorID = &creatorID
s.untrustedCreatorID = &creatorID
}
if gotTimestamp {
intTimestamp := int64(timestamp)
if float64(intTimestamp) != timestamp {
return internal.NewInvalidSignatureError("Field optional.timestamp is not is not an integer")
}
s.UntrustedTimestamp = &intTimestamp
s.untrustedTimestamp = &intTimestamp
}
var t string
@ -173,10 +173,10 @@ func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
}); err != nil {
return err
}
s.UntrustedDockerManifestDigest = digest.Digest(digestString)
s.untrustedDockerManifestDigest = digest.Digest(digestString)
return internal.ParanoidUnmarshalJSONObjectExactFields(identity, map[string]any{
"docker-reference": &s.UntrustedDockerReference,
"docker-reference": &s.untrustedDockerReference,
})
}
@ -229,16 +229,16 @@ func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte
if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
return nil, internal.NewInvalidSignatureError(err.Error())
}
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.untrustedDockerManifestDigest); err != nil {
return nil, err
}
if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
if err := rules.validateSignedDockerReference(unmatchedSignature.untrustedDockerReference); err != nil {
return nil, err
}
// signatureAcceptanceRules have accepted this value.
return &Signature{
DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
DockerReference: unmatchedSignature.UntrustedDockerReference,
DockerManifestDigest: unmatchedSignature.untrustedDockerManifestDigest,
DockerReference: unmatchedSignature.untrustedDockerReference,
}, nil
}
@ -269,14 +269,14 @@ func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []
}
var timestamp *time.Time // = nil
if untrustedDecodedContents.UntrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
if untrustedDecodedContents.untrustedTimestamp != nil {
ts := time.Unix(*untrustedDecodedContents.untrustedTimestamp, 0)
timestamp = &ts
}
return &UntrustedSignatureInformation{
UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
UntrustedDockerManifestDigest: untrustedDecodedContents.untrustedDockerManifestDigest,
UntrustedDockerReference: untrustedDecodedContents.untrustedDockerReference,
UntrustedCreatorID: untrustedDecodedContents.untrustedCreatorID,
UntrustedTimestamp: timestamp,
UntrustedShortKeyIdentifier: shortKeyIdentifier,
}, nil

View File

@ -77,13 +77,19 @@ type storageImageDestination struct {
indexToStorageID map[int]*string
// All accesses to below data are protected by `lock` which is made
// *explicit* in the code.
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToPulledLayerInfo map[int]*manifest.LayerInfo // Mapping from layer (by index) to pulled down blob
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
}
// addedLayerInfo records data about a layer to use in this image.
type addedLayerInfo struct {
digest digest.Digest
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
}
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
@ -111,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
HasThreadSafePutBlob: true,
}),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToPulledLayerInfo: make(map[int]*manifest.LayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
imageRef: imageRef,
directory: directory,
signatureses: make(map[digest.Digest][]byte),
blobDiffIDs: make(map[digest.Digest]digest.Digest),
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
fileSizes: make(map[digest.Digest]int64),
filenames: make(map[digest.Digest]string),
SignatureSizes: []int{},
SignaturesSizes: make(map[digest.Digest][]int),
indexToStorageID: make(map[int]*string),
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
}
dest.Compat = impl.AddCompat(dest)
return dest, nil
@ -158,7 +164,7 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
// to any other readers for download using the supplied digest.
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (types.BlobInfo, error) {
func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) {
info, err := s.putBlobToPendingFile(stream, blobinfo, &options)
if err != nil {
return info, err
@ -168,21 +174,20 @@ func (s *storageImageDestination) PutBlobWithOptions(ctx context.Context, stream
return info, nil
}
return info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
return info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
}
// putBlobToPendingFile implements ImageDestination.PutBlobWithOptions, storing stream into an on-disk file.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (types.BlobInfo, error) {
func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinfo types.BlobInfo, options *private.PutBlobOptions) (private.UploadedBlob, error) {
// Stores a layer or data blob in our temporary directory, checking that any information
// in the blobinfo matches the incoming data.
errorBlobInfo := types.BlobInfo{
Digest: "",
Size: -1,
}
if blobinfo.Digest != "" {
if err := blobinfo.Digest.Validate(); err != nil {
return errorBlobInfo, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
return private.UploadedBlob{}, fmt.Errorf("invalid digest %#v: %w", blobinfo.Digest.String(), err)
}
}
@ -190,7 +195,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
filename := s.computeNextBlobCacheFile()
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
if err != nil {
return errorBlobInfo, fmt.Errorf("creating temporary file %q: %w", filename, err)
return private.UploadedBlob{}, fmt.Errorf("creating temporary file %q: %w", filename, err)
}
defer file.Close()
counter := ioutils.NewWriteCounter(file)
@ -198,7 +203,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
digester, stream := putblobdigest.DigestIfUnknown(stream, blobinfo)
decompressed, err := archive.DecompressStream(stream)
if err != nil {
return errorBlobInfo, fmt.Errorf("setting up to decompress blob: %w", err)
return private.UploadedBlob{}, fmt.Errorf("setting up to decompress blob: %w", err)
}
diffID := digest.Canonical.Digester()
@ -207,7 +212,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
_, err = io.Copy(diffID.Hash(), decompressed)
decompressed.Close()
if err != nil {
return errorBlobInfo, fmt.Errorf("storing blob to file %q: %w", filename, err)
return private.UploadedBlob{}, fmt.Errorf("storing blob to file %q: %w", filename, err)
}
// Determine blob properties, and fail if information that we were given about the blob
@ -217,7 +222,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
if blobSize < 0 {
blobSize = counter.Count
} else if blobinfo.Size != counter.Count {
return errorBlobInfo, ErrBlobSizeMismatch
return private.UploadedBlob{}, ErrBlobSizeMismatch
}
// Record information about the blob.
@ -229,10 +234,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
// This is safe because we have just computed diffID, and blobDigest was either computed
// by us, or validated by the caller (usually copy.digestingReader).
options.Cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
return types.BlobInfo{
Digest: blobDigest,
Size: blobSize,
MediaType: blobinfo.MediaType,
return private.UploadedBlob{
Digest: blobDigest,
Size: blobSize,
}, nil
}
@ -265,7 +269,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
// It is available only if SupportsPutBlobPartial().
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
// should fall back to PutBlobWithOptions.
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (types.BlobInfo, error) {
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
fetcher := zstdFetcher{
chunkAccessor: chunkAccessor,
ctx: ctx,
@ -274,12 +278,12 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
if err != nil {
return srcInfo, err
return private.UploadedBlob{}, err
}
out, err := s.imageRef.transport.store.ApplyDiffWithDiffer("", nil, differ)
if err != nil {
return srcInfo, err
return private.UploadedBlob{}, err
}
blobDigest := srcInfo.Digest
@ -291,124 +295,126 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
s.diffOutputs[blobDigest] = out
s.lock.Unlock()
return srcInfo, nil
return private.UploadedBlob{
Digest: blobDigest,
Size: srcInfo.Size,
}, nil
}
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
// info.Digest must not be empty.
// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may
// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be
// reflected in the manifest that will be written.
// If the blob has been successfully reused, returns (true, info, nil).
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
reused, info, err := s.tryReusingBlobAsPending(blobinfo, &options)
func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context, blobinfo types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
reused, info, err := s.tryReusingBlobAsPending(blobinfo.Digest, blobinfo.Size, &options)
if err != nil || !reused || options.LayerIndex == nil {
return reused, info, err
}
return reused, info, s.queueOrCommit(ctx, info, *options.LayerIndex, options.EmptyLayer)
return reused, info, s.queueOrCommit(*options.LayerIndex, addedLayerInfo{
digest: info.Digest,
emptyLayer: options.EmptyLayer,
})
}
// tryReusingBlobAsPending implements TryReusingBlobWithOptions, filling s.blobDiffIDs and other metadata.
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
// The caller must arrange the blob to be eventually committed using s.commitLayer().
func (s *storageImageDestination) tryReusingBlobAsPending(blobinfo types.BlobInfo, options *private.TryReusingBlobOptions) (bool, types.BlobInfo, error) {
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
// lock the entire method as it executes fairly quickly
s.lock.Lock()
defer s.lock.Unlock()
if options.SrcRef != nil {
// Check if we have the layer in the underlying additional layer store.
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobinfo.Digest, options.SrcRef.String())
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobinfo.Digest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
} else if err == nil {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[blobinfo.Digest] = aLayer
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: aLayer.CompressedSize(),
MediaType: blobinfo.MediaType,
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
s.blobAdditionalLayer[digest] = aLayer
return true, private.ReusedBlob{
Digest: digest,
Size: aLayer.CompressedSize(),
}, nil
}
}
if blobinfo.Digest == "" {
return false, types.BlobInfo{}, errors.New(`Can not check for a blob with unknown digest`)
if digest == "" {
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
}
if err := blobinfo.Digest.Validate(); err != nil {
return false, types.BlobInfo{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
if err := digest.Validate(); err != nil {
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
}
// Check if we've already cached it in a file.
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: size,
MediaType: blobinfo.MediaType,
if size, ok := s.fileSizes[digest]; ok {
return true, private.ReusedBlob{
Digest: digest,
Size: size,
}, nil
}
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobinfo.Digest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
}
if len(layers) > 0 {
// Save this for completeness.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: layers[0].UncompressedSize,
MediaType: blobinfo.MediaType,
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].UncompressedSize,
}, nil
}
// Check if we have a was-compressed layer in storage that's based on that blob.
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, types.BlobInfo{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobinfo.Digest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
}
if len(layers) > 0 {
// Record the uncompressed value so that we can use it to calculate layer IDs.
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
Digest: blobinfo.Digest,
Size: layers[0].CompressedSize,
MediaType: blobinfo.MediaType,
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: layers[0].CompressedSize,
}, nil
}
// Does the blob correspond to a known DiffID which we already have available?
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
if options.CanSubstitute || blobinfo.Size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
if options.CanSubstitute || size != -1 {
if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
return false, types.BlobInfo{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
}
if len(layers) > 0 {
if blobinfo.Size != -1 {
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
return true, blobinfo, nil
if size != -1 {
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
return true, private.ReusedBlob{
Digest: digest,
Size: size,
}, nil
}
if !options.CanSubstitute {
return false, types.BlobInfo{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blobInfo %v", blobinfo)
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
}
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
return true, types.BlobInfo{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
MediaType: blobinfo.MediaType,
return true, private.ReusedBlob{
Digest: uncompressedDigest,
Size: layers[0].UncompressedSize,
}, nil
}
}
}
// Nope, we don't have it.
return false, types.BlobInfo{}, nil
return false, private.ReusedBlob{}, nil
}
// computeID computes a recommended image ID based on information we have so far. If
@ -470,10 +476,10 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
return nil, errors.New("blob not found")
}
// queueOrCommit queues in the specified blob to be committed to the storage.
// queueOrCommit queues the specified layer to be committed to the storage.
// If no other goroutine is already committing layers, the layer and all
// subsequent layers (if already queued) will be committed to the storage.
func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.BlobInfo, index int, emptyLayer bool) error {
func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo) error {
// NOTE: whenever the code below is touched, make sure that all code
// paths unlock the lock and to unlock it exactly once.
//
@ -493,10 +499,7 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
// caller is the "worker" routine committing layers. All other routines
// can continue pulling and queuing in layers.
s.lock.Lock()
s.indexToPulledLayerInfo[index] = &manifest.LayerInfo{
BlobInfo: blob,
EmptyLayer: emptyLayer,
}
s.indexToAddedLayerInfo[index] = info
// We're still waiting for at least one previous/parent layer to be
// committed, so there's nothing to do.
@ -505,10 +508,14 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
return nil
}
for info := s.indexToPulledLayerInfo[index]; info != nil; info = s.indexToPulledLayerInfo[index] {
for {
info, ok := s.indexToAddedLayerInfo[index]
if !ok {
break
}
s.lock.Unlock()
// Note: commitLayer locks on-demand.
if err := s.commitLayer(ctx, *info, index); err != nil {
if err := s.commitLayer(index, info, -1); err != nil {
return err
}
s.lock.Lock()
@ -522,13 +529,15 @@ func (s *storageImageDestination) queueOrCommit(ctx context.Context, blob types.
return nil
}
// commitLayer commits the specified blob with the given index to the storage.
// commitLayer commits the specified layer with the given index to the storage.
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
//
// Note that the previous layer is expected to already be committed.
//
// Caution: this function must be called without holding `s.lock`. Callers
// must guarantee that, at any given time, at most one goroutine may execute
// `commitLayer()`.
func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest.LayerInfo, index int) error {
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
// Already committed? Return early.
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
return nil
@ -543,7 +552,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
}
// Carry over the previous ID for empty non-base layers.
if blob.EmptyLayer {
if info.emptyLayer {
s.indexToStorageID[index] = &lastLayer
return nil
}
@ -551,7 +560,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
// Check if there's already a layer with the ID that we'd give to the result of applying
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
s.lock.Lock()
diffID, haveDiffID := s.blobDiffIDs[blob.Digest]
diffID, haveDiffID := s.blobDiffIDs[info.digest]
s.lock.Unlock()
if !haveDiffID {
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
@ -560,18 +569,21 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
// that relies on using a blob digest that has never been seen by the store had better call
// TryReusingBlob; not calling PutBlob already violates the documented API, so theres only
// so far we are going to accommodate that (if we should be doing that at all).
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
// NOTE: use `TryReusingBlob` to prevent recursion.
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, none.NoCache, false)
logrus.Debugf("looking for diffID for blob %+v", info.digest)
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
Cache: none.NoCache,
CanSubstitute: false,
})
if err != nil {
return fmt.Errorf("checking for a layer based on blob %q: %w", blob.Digest.String(), err)
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
}
if !has {
return fmt.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String())
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
}
diffID, haveDiffID = s.blobDiffIDs[blob.Digest]
diffID, haveDiffID = s.blobDiffIDs[info.digest]
if !haveDiffID {
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String())
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
}
}
id := diffID.Hex()
@ -586,7 +598,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
}
s.lock.Lock()
diffOutput, ok := s.diffOutputs[blob.Digest]
diffOutput, ok := s.diffOutputs[info.digest]
s.lock.Unlock()
if ok {
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
@ -595,7 +607,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
}
// FIXME: what to do with the uncompressed digest?
diffOutput.UncompressedDigest = blob.Digest
diffOutput.UncompressedDigest = info.digest
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
_ = s.imageRef.transport.store.Delete(layer.ID)
@ -607,7 +619,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
}
s.lock.Lock()
al, ok := s.blobAdditionalLayer[blob.Digest]
al, ok := s.blobAdditionalLayer[info.digest]
s.lock.Unlock()
if ok {
layer, err := al.PutAs(id, lastLayer, nil)
@ -622,7 +634,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
// Check if we previously cached a file with that blob's contents. If we didn't,
// then we need to read the desired contents from a layer.
s.lock.Lock()
filename, ok := s.filenames[blob.Digest]
filename, ok := s.filenames[info.digest]
s.lock.Unlock()
if !ok {
// Try to find the layer with contents matching that blobsum.
@ -631,13 +643,13 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
} else {
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest)
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
if err2 == nil && len(layers) > 0 {
layer = layers[0].ID
}
}
if layer == "" {
return fmt.Errorf("locating layer for blob %q: %w", blob.Digest, err2)
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
}
// Read the layer's contents.
noCompression := archive.Uncompressed
@ -646,7 +658,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
}
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
if err2 != nil {
return fmt.Errorf("reading layer %q for blob %q: %w", layer, blob.Digest, err2)
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
}
// Copy the layer diff to a file. Diff() takes a lock that it holds
// until the ReadCloser that it returns is closed, and PutLayer() wants
@ -670,7 +682,7 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
// Make sure that we can find this file later, should we need the layer's
// contents again.
s.lock.Lock()
s.filenames[blob.Digest] = filename
s.filenames[info.digest] = filename
s.lock.Unlock()
}
// Read the cached blob and use it as a diff.
@ -682,11 +694,11 @@ func (s *storageImageDestination) commitLayer(ctx context.Context, blob manifest
// Build the new layer using the diff, regardless of where it came from.
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
OriginalDigest: blob.Digest,
OriginalDigest: info.digest,
UncompressedDigest: diffID,
}, file)
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
return fmt.Errorf("adding layer with blob %q: %w", blob.Digest, err)
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
}
s.indexToStorageID[index] = &layer.ID
@ -737,7 +749,10 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
// Extract, commit, or find the layers.
for i, blob := range layerBlobs {
if err := s.commitLayer(ctx, blob, i); err != nil {
if err := s.commitLayer(i, addedLayerInfo{
digest: blob.Digest,
emptyLayer: blob.EmptyLayer,
}, blob.Size); err != nil {
return err
}
}

View File

@ -8,7 +8,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 24
// VersionPatch is for backwards-compatible bug fixes
VersionPatch = 2
VersionPatch = 3
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = "-dev"

29
vendor/github.com/go-logr/logr/.golangci.yaml generated vendored Normal file
View File

@ -0,0 +1,29 @@
run:
timeout: 1m
tests: true
linters:
disable-all: true
enable:
- asciicheck
- deadcode
- errcheck
- forcetypeassert
- gocritic
- gofmt
- goimports
- gosimple
- govet
- ineffassign
- misspell
- revive
- staticcheck
- structcheck
- typecheck
- unused
- varcheck
issues:
exclude-use-default: false
max-issues-per-linter: 0
max-same-issues: 10

6
vendor/github.com/go-logr/logr/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# CHANGELOG
## v1.0.0-rc1
This is the first logged release. Major changes (including breaking changes)
have occurred since earlier tags.

17
vendor/github.com/go-logr/logr/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,17 @@
# Contributing
Logr is open to pull-requests, provided they fit within the intended scope of
the project. Specifically, this library aims to be VERY small and minimalist,
with no external dependencies.
## Compatibility
This project intends to follow [semantic versioning](http://semver.org) and
is very strict about compatibility. Any proposed changes MUST follow those
rules.
## Performance
As a logging library, logr must be as light-weight as possible. Any proposed
code change must include results of running the [benchmark](./benchmark)
before and after the change.

201
vendor/github.com/go-logr/logr/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

282
vendor/github.com/go-logr/logr/README.md generated vendored Normal file
View File

@ -0,0 +1,282 @@
# A minimal logging API for Go
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
logr offers an(other) opinion on how Go programs and libraries can do logging
without becoming coupled to a particular logging implementation. This is not
an implementation of logging - it is an API. In fact it is two APIs with two
different sets of users.
The `Logger` type is intended for application and library authors. It provides
a relatively small API which can be used everywhere you want to emit logs. It
defers the actual act of writing logs (to files, to stdout, or whatever) to the
`LogSink` interface.
The `LogSink` interface is intended for logging library implementers. It is a
pure interface which can be implemented by logging frameworks to provide the actual logging
functionality.
This decoupling allows application and library developers to write code in
terms of `logr.Logger` (which has very low dependency fan-out) while the
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
Application developers can then switch out implementations as necessary.
Many people assert that libraries should not be logging, and as such efforts
like this are pointless. Those people are welcome to convince the authors of
the tens-of-thousands of libraries that *DO* write logs that they are all
wrong. In the meantime, logr takes a more practical approach.
## Typical usage
Somewhere, early in an application's life, it will make a decision about which
logging library (implementation) it actually wants to use. Something like:
```
func main() {
// ... other setup code ...
// Create the "root" logger. We have chosen the "logimpl" implementation,
// which takes some initial parameters and returns a logr.Logger.
logger := logimpl.New(param1, param2)
// ... other setup code ...
```
Most apps will call into other libraries, create structures to govern the flow,
etc. The `logr.Logger` object can be passed to these other libraries, stored
in structs, or even used as a package-global variable, if needed. For example:
```
app := createTheAppObject(logger)
app.Run()
```
Outside of this early setup, no other packages need to know about the choice of
implementation. They write logs in terms of the `logr.Logger` that they
received:
```
type appObject struct {
// ... other fields ...
logger logr.Logger
// ... other fields ...
}
func (app *appObject) Run() {
app.logger.Info("starting up", "timestamp", time.Now())
// ... app code ...
```
## Background
If the Go standard library had defined an interface for logging, this project
probably would not be needed. Alas, here we are.
### Inspiration
Before you consider this package, please read [this blog post by the
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
he has to say, and it largely aligns with our own experiences.
### Differences from Dave's ideas
The main differences are:
1. Dave basically proposes doing away with the notion of a logging API in favor
of `fmt.Printf()`. We disagree, especially when you consider things like output
locations, timestamps, file and line decorations, and structured logging. This
package restricts the logging API to just 2 types of logs: info and error.
Info logs are things you want to tell the user which are not errors. Error
logs are, well, errors. If your code receives an `error` from a subordinate
function call and is logging that `error` *and not returning it*, use error
logs.
2. Verbosity-levels on info logs. This gives developers a chance to indicate
arbitrary grades of importance for info logs, without assigning names with
semantic meaning such as "warning", "trace", and "debug." Superficially this
may feel very similar, but the primary difference is the lack of semantics.
Because verbosity is a numerical value, it's safe to assume that an app running
with higher verbosity means more (and less important) logs will be generated.
## Implementations (non-exhaustive)
There are implementations for the following logging libraries:
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
## FAQ
### Conceptual
#### Why structured logging?
- **Structured logs are more easily queryable**: Since you've got
key-value pairs, it's much easier to query your structured logs for
particular values by filtering on the contents of a particular key --
think searching request logs for error codes, Kubernetes reconcilers for
the name and namespace of the reconciled object, etc.
- **Structured logging makes it easier to have cross-referenceable logs**:
Similarly to searchability, if you maintain conventions around your
keys, it becomes easy to gather all log lines related to a particular
concept.
- **Structured logs allow better dimensions of filtering**: if you have
structure to your logs, you've got more precise control over how much
information is logged -- you might choose in a particular configuration
to log certain keys but not others, only log lines where a certain key
matches a certain value, etc., instead of just having v-levels and names
to key off of.
- **Structured logs better represent structured data**: sometimes, the
data that you want to log is inherently structured (think tuple-link
objects.) Structured logs allow you to preserve that structure when
outputting.
#### Why V-levels?
**V-levels give operators an easy way to control the chattiness of log
operations**. V-levels provide a way for a given package to distinguish
the relative importance or verbosity of a given log message. Then, if
a particular logger or package is logging too many messages, the user
of the package can simply change the v-levels for that library.
#### Why not named levels, like Info/Warning/Error?
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
from Dave's ideas](#differences-from-daves-ideas).
#### Why not allow format strings, too?
**Format strings negate many of the benefits of structured logs**:
- They're not easily searchable without resorting to fuzzy searching,
regular expressions, etc.
- They don't store structured data well, since contents are flattened into
a string.
- They're not cross-referenceable.
- They don't compress easily, since the message is not constant.
(Unless you turn positional parameters into key-value pairs with numerical
keys, at which point you've gotten key-value logging with meaningless
keys.)
### Practical
#### Why key-value pairs, and not a map?
Key-value pairs are *much* easier to optimize, especially around
allocations. Zap (a structured logger that inspired logr's interface) has
[performance measurements](https://github.com/uber-go/zap#performance)
that show this quite nicely.
While the interface ends up being a little less obvious, you get
potentially better performance, plus avoid making users type
`map[string]string{}` every time they want to log.
#### What if my V-levels differ between libraries?
That's fine. Control your V-levels on a per-logger basis, and use the
`WithName` method to pass different loggers to different libraries.
Generally, you should take care to ensure that you have relatively
consistent V-levels within a given logger, however, as this makes deciding
on what verbosity of logs to request easier.
#### But I really want to use a format string!
That's not actually a question. Assuming your question is "how do
I convert my mental model of logging with format strings to logging with
constant messages":
1. Figure out what the error actually is, as you'd write in a TL;DR style,
and use that as a message.
2. For every place you'd write a format specifier, look to the word before
it, and add that as a key value pair.
For instance, consider the following examples (all taken from spots in the
Kubernetes codebase):
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
responseCode, err)` becomes `logger.Error(err, "client returned an
error", "code", responseCode)`
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
response when requesting url", "attempt", retries, "after
seconds", seconds, "url", url)`
If you *really* must use a format string, use it in a key's value, and
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
reflect over type %T")` becomes `logger.Info("unable to reflect over
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
this is necessary should be few and far between.
#### How do I choose my V-levels?
This is basically the only hard constraint: increase V-levels to denote
more verbose or more debug-y logs.
Otherwise, you can start out with `0` as "you always want to see this",
`1` as "common logging that you might *possibly* want to turn off", and
`10` as "I would like to performance-test your log collection stack."
Then gradually choose levels in between as you need them, working your way
down from 10 (for debug and trace style logs) and up from 1 (for chattier
info-type logs.)
#### How do I choose my keys?
Keys are fairly flexible, and can hold more or less any string
value. For best compatibility with implementations and consistency
with existing code in other projects, there are a few conventions you
should consider.
- Make your keys human-readable.
- Constant keys are generally a good idea.
- Be consistent across your codebase.
- Keys should naturally match parts of the message string.
- Use lower case for simple keys and
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
more complex ones. Kubernetes is one example of a project that has
[adopted that
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
While key names are mostly unrestricted (and spaces are acceptable),
it's generally a good idea to stick to printable ascii characters, or at
least match the general character set of your log lines.
#### Why should keys be constant values?
The point of structured logging is to make later log processing easier. Your
keys are, effectively, the schema of each log message. If you use different
keys across instances of the same log line, you will make your structured logs
much harder to use. `Sprintf()` is for values, not for keys!
#### Why is this not a pure interface?
The Logger type is implemented as a struct in order to allow the Go compiler to
optimize things like high-V `Info` logs that are not triggered. Not all of
these implementations are implemented yet, but this structure was suggested as
a way to ensure they *can* be implemented. All of the real work is behind the
`LogSink` interface.
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging

54
vendor/github.com/go-logr/logr/discard.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
/*
Copyright 2020 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// Discard returns a Logger that discards all messages logged to it. It can be
// used whenever the caller is not interested in the logs. Logger instances
// produced by this function always compare as equal.
func Discard() Logger {
return Logger{
level: 0,
sink: discardLogSink{},
}
}
// discardLogSink is a LogSink that discards all messages.
type discardLogSink struct{}
// Verify that it actually implements the interface
var _ LogSink = discardLogSink{}
func (l discardLogSink) Init(RuntimeInfo) {
}
func (l discardLogSink) Enabled(int) bool {
return false
}
func (l discardLogSink) Info(int, string, ...interface{}) {
}
func (l discardLogSink) Error(error, string, ...interface{}) {
}
func (l discardLogSink) WithValues(...interface{}) LogSink {
return l
}
func (l discardLogSink) WithName(string) LogSink {
return l
}

787
vendor/github.com/go-logr/logr/funcr/funcr.go generated vendored Normal file
View File

@ -0,0 +1,787 @@
/*
Copyright 2021 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package funcr implements formatting of structured log messages and
// optionally captures the call site and timestamp.
//
// The simplest way to use it is via its implementation of a
// github.com/go-logr/logr.LogSink with output through an arbitrary
// "write" function. See New and NewJSON for details.
//
// Custom LogSinks
//
// For users who need more control, a funcr.Formatter can be embedded inside
// your own custom LogSink implementation. This is useful when the LogSink
// needs to implement additional methods, for example.
//
// Formatting
//
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
// values which are being logged. When rendering a struct, funcr will use Go's
// standard JSON tags (all except "string").
package funcr
import (
"bytes"
"encoding"
"fmt"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"time"
"github.com/go-logr/logr"
)
// New returns a logr.Logger which is implemented by an arbitrary function.
func New(fn func(prefix, args string), opts Options) logr.Logger {
return logr.New(newSink(fn, NewFormatter(opts)))
}
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
// and produces JSON output.
func NewJSON(fn func(obj string), opts Options) logr.Logger {
fnWrapper := func(_, obj string) {
fn(obj)
}
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
}
// Underlier exposes access to the underlying logging function. Since
// callers only have a logr.Logger, they have to know which
// implementation is in use, so this interface is less of an
// abstraction and more of a way to test type conversion.
type Underlier interface {
GetUnderlying() func(prefix, args string)
}
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
l := &fnlogger{
Formatter: formatter,
write: fn,
}
// For skipping fnlogger.Info and fnlogger.Error.
l.Formatter.AddCallDepth(1)
return l
}
// Options carries parameters which influence the way logs are generated.
type Options struct {
// LogCaller tells funcr to add a "caller" key to some or all log lines.
// This has some overhead, so some users might not want it.
LogCaller MessageClass
// LogCallerFunc tells funcr to also log the calling function name. This
// has no effect if caller logging is not enabled (see Options.LogCaller).
LogCallerFunc bool
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
// overhead, so some users might not want it.
LogTimestamp bool
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
// is enabled. If not specified, a default format will be used. For more
// details, see docs for Go's time.Layout.
TimestampFormat string
// Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded.
Verbosity int
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
// while a log line is being rendered. The kvList argument follows logr
// conventions - each pair of slice elements is comprised of a string key
// and an arbitrary value (verified and sanitized before calling this
// hook). The value returned must follow the same conventions. This hook
// can be used to audit or modify logged data. For example, you might want
// to prefix all of funcr's built-in keys with some string. This hook is
// only called for built-in (provided by funcr itself) key-value pairs.
// Equivalent hooks are offered for key-value pairs saved via
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
// for user-provided pairs (see RenderArgsHook).
RenderBuiltinsHook func(kvList []interface{}) []interface{}
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
// only called for key-value pairs saved via logr.Logger.WithValues. See
// RenderBuiltinsHook for more details.
RenderValuesHook func(kvList []interface{}) []interface{}
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
// called for key-value pairs passed directly to Info and Error. See
// RenderBuiltinsHook for more details.
RenderArgsHook func(kvList []interface{}) []interface{}
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
// that contains a struct, etc.) it may log. Every time it finds a struct,
// slice, array, or map the depth is increased by one. When the maximum is
// reached, the value will be converted to a string indicating that the max
// depth has been exceeded. If this field is not specified, a default
// value will be used.
MaxLogDepth int
}
// MessageClass indicates which category or categories of messages to consider.
type MessageClass int
const (
// None ignores all message classes.
None MessageClass = iota
// All considers all message classes.
All
// Info only considers info messages.
Info
// Error only considers error messages.
Error
)
// fnlogger inherits some of its LogSink implementation from Formatter
// and just needs to add some glue code.
type fnlogger struct {
Formatter
write func(prefix, args string)
}
func (l fnlogger) WithName(name string) logr.LogSink {
l.Formatter.AddName(name)
return &l
}
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
l.Formatter.AddCallDepth(depth)
return &l
}
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
prefix, args := l.FormatInfo(level, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
prefix, args := l.FormatError(err, msg, kvList)
l.write(prefix, args)
}
func (l fnlogger) GetUnderlying() func(prefix, args string) {
return l.write
}
// Assert conformance to the interfaces.
var _ logr.LogSink = &fnlogger{}
var _ logr.CallDepthLogSink = &fnlogger{}
var _ Underlier = &fnlogger{}
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
func NewFormatter(opts Options) Formatter {
return newFormatter(opts, outputKeyValue)
}
// NewFormatterJSON constructs a Formatter which emits strict JSON.
func NewFormatterJSON(opts Options) Formatter {
return newFormatter(opts, outputJSON)
}
// Defaults for Options.
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
const defaultMaxLogDepth = 16
func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.TimestampFormat == "" {
opts.TimestampFormat = defaultTimestampFormat
}
if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth
}
f := Formatter{
outputFormat: outfmt,
prefix: "",
values: nil,
depth: 0,
opts: opts,
}
return f
}
// Formatter is an opaque struct which can be embedded in a LogSink
// implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink.
type Formatter struct {
outputFormat outputFormat
prefix string
values []interface{}
valuesStr string
depth int
opts Options
}
// outputFormat indicates which outputFormat to use.
type outputFormat int
const (
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
outputKeyValue outputFormat = iota
// outputJSON emits strict JSON.
outputJSON
)
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
type PseudoStruct []interface{}
// render produces a log line, ready to use.
func (f Formatter) render(builtins, args []interface{}) string {
// Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON {
buf.WriteByte('{')
}
vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
}
continuing = true
buf.WriteString(f.valuesStr)
}
vals = args
if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals))
}
f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}')
}
return buf.String()
}
// flatten renders a list of key-value pairs into a buffer. If continuing is
// true, it assumes that the buffer has previous values and will emit a
// separator (which depends on the output format) before the first pair it
// writes. If escapeKeys is true, the keys are assumed to have
// non-JSON-compatible characters in them and must be evaluated for escapes.
//
// This function returns a potentially modified version of kvList, which
// ensures that there is a value for every key (adding a value if needed) and
// that each key is a string (substituting a key if needed).
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
// This logic overlaps with sanitize() but saves one type-cast per key,
// which can be measurable.
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string)
if !ok {
k = f.nonStringKey(kvList[i])
kvList[i] = k
}
v := kvList[i+1]
if i > 0 || continuing {
if f.outputFormat == outputJSON {
buf.WriteByte(',')
} else {
// In theory the format could be something we don't understand. In
// practice, we control it, so it won't be.
buf.WriteByte(' ')
}
}
if escapeKeys {
buf.WriteString(prettyString(k))
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.pretty(v))
}
return kvList
}
func (f Formatter) pretty(value interface{}) string {
return f.prettyWithFlags(value, 0, 0)
}
const (
flagRawStruct = 0x1 // do not print braces on structs
)
// TODO: This is not fast. Most of the overhead goes here.
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
if depth > f.opts.MaxLogDepth {
return `"<max-log-depth-exceeded>"`
}
// Handle types that take full control of logging.
if v, ok := value.(logr.Marshaler); ok {
// Replace the value with what the type wants to get logged.
// That then gets handled below via reflection.
value = invokeMarshaler(v)
}
// Handle types that want to format themselves.
switch v := value.(type) {
case fmt.Stringer:
value = invokeStringer(v)
case error:
value = invokeError(v)
}
// Handling the most common types without reflect is a small perf win.
switch v := value.(type) {
case bool:
return strconv.FormatBool(v)
case string:
return prettyString(v)
case int:
return strconv.FormatInt(int64(v), 10)
case int8:
return strconv.FormatInt(int64(v), 10)
case int16:
return strconv.FormatInt(int64(v), 10)
case int32:
return strconv.FormatInt(int64(v), 10)
case int64:
return strconv.FormatInt(int64(v), 10)
case uint:
return strconv.FormatUint(uint64(v), 10)
case uint8:
return strconv.FormatUint(uint64(v), 10)
case uint16:
return strconv.FormatUint(uint64(v), 10)
case uint32:
return strconv.FormatUint(uint64(v), 10)
case uint64:
return strconv.FormatUint(v, 10)
case uintptr:
return strconv.FormatUint(uint64(v), 10)
case float32:
return strconv.FormatFloat(float64(v), 'f', -1, 32)
case float64:
return strconv.FormatFloat(v, 'f', -1, 64)
case complex64:
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
case complex128:
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
case PseudoStruct:
buf := bytes.NewBuffer(make([]byte, 0, 1024))
v = f.sanitize(v)
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
for i := 0; i < len(v); i += 2 {
if i > 0 {
buf.WriteByte(',')
}
k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping
buf.WriteString(prettyString(k))
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
}
if flags&flagRawStruct == 0 {
buf.WriteByte('}')
}
return buf.String()
}
buf := bytes.NewBuffer(make([]byte, 0, 256))
t := reflect.TypeOf(value)
if t == nil {
return "null"
}
v := reflect.ValueOf(value)
switch t.Kind() {
case reflect.Bool:
return strconv.FormatBool(v.Bool())
case reflect.String:
return prettyString(v.String())
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return strconv.FormatInt(int64(v.Int()), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return strconv.FormatUint(uint64(v.Uint()), 10)
case reflect.Float32:
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
case reflect.Float64:
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
case reflect.Complex64:
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
case reflect.Complex128:
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
case reflect.Struct:
if flags&flagRawStruct == 0 {
buf.WriteByte('{')
}
for i := 0; i < t.NumField(); i++ {
fld := t.Field(i)
if fld.PkgPath != "" {
// reflect says this field is only defined for non-exported fields.
continue
}
if !v.Field(i).CanInterface() {
// reflect isn't clear exactly what this means, but we can't use it.
continue
}
name := ""
omitempty := false
if tag, found := fld.Tag.Lookup("json"); found {
if tag == "-" {
continue
}
if comma := strings.Index(tag, ","); comma != -1 {
if n := tag[:comma]; n != "" {
name = n
}
rest := tag[comma:]
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
omitempty = true
}
} else {
name = tag
}
}
if omitempty && isEmpty(v.Field(i)) {
continue
}
if i > 0 {
buf.WriteByte(',')
}
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
continue
}
if name == "" {
name = fld.Name
}
// field names can't contain characters which need escaping
buf.WriteByte('"')
buf.WriteString(name)
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
}
if flags&flagRawStruct == 0 {
buf.WriteByte('}')
}
return buf.String()
case reflect.Slice, reflect.Array:
buf.WriteByte('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteByte(',')
}
e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
}
buf.WriteByte(']')
return buf.String()
case reflect.Map:
buf.WriteByte('{')
// This does not sort the map keys, for best perf.
it := v.MapRange()
i := 0
for it.Next() {
if i > 0 {
buf.WriteByte(',')
}
// If a map key supports TextMarshaler, use it.
keystr := ""
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
txt, err := m.MarshalText()
if err != nil {
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
} else {
keystr = string(txt)
}
keystr = prettyString(keystr)
} else {
// prettyWithFlags will produce already-escaped values
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
if t.Key().Kind() != reflect.String {
// JSON only does string keys. Unlike Go's standard JSON, we'll
// convert just about anything to a string.
keystr = prettyString(keystr)
}
}
buf.WriteString(keystr)
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++
}
buf.WriteByte('}')
return buf.String()
case reflect.Ptr, reflect.Interface:
if v.IsNil() {
return "null"
}
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
}
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
}
func prettyString(s string) string {
// Avoid escaping (which does allocations) if we can.
if needsEscape(s) {
return strconv.Quote(s)
}
b := bytes.NewBuffer(make([]byte, 0, 1024))
b.WriteByte('"')
b.WriteString(s)
b.WriteByte('"')
return b.String()
}
// needsEscape determines whether the input string needs to be escaped or not,
// without doing any allocations.
func needsEscape(s string) bool {
for _, r := range s {
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
return true
}
}
return false
}
func isEmpty(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Complex64, reflect.Complex128:
return v.Complex() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return m.MarshalLog()
}
func invokeStringer(s fmt.Stringer) (ret string) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return s.String()
}
func invokeError(e error) (ret string) {
defer func() {
if r := recover(); r != nil {
ret = fmt.Sprintf("<panic: %s>", r)
}
}()
return e.Error()
}
// Caller represents the original call site for a log line, after considering
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
// Line fields will always be provided, while the Func field is optional.
// Users can set the render hook fields in Options to examine logged key-value
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
// field is enabled for the given MessageClass.
type Caller struct {
// File is the basename of the file for this call site.
File string `json:"file"`
// Line is the line number in the file for this call site.
Line int `json:"line"`
// Func is the function name for this call site, or empty if
// Options.LogCallerFunc is not enabled.
Func string `json:"function,omitempty"`
}
func (f Formatter) caller() Caller {
// +1 for this frame, +1 for Info/Error.
pc, file, line, ok := runtime.Caller(f.depth + 2)
if !ok {
return Caller{"<unknown>", 0, ""}
}
fn := ""
if f.opts.LogCallerFunc {
if fp := runtime.FuncForPC(pc); fp != nil {
fn = fp.Name()
}
}
return Caller{filepath.Base(file), line, fn}
}
const noValue = "<no-value>"
func (f Formatter) nonStringKey(v interface{}) string {
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
}
// snippet produces a short snippet string of an arbitrary value.
func (f Formatter) snippet(v interface{}) string {
const snipLen = 16
snip := f.pretty(v)
if len(snip) > snipLen {
snip = snip[:snipLen]
}
return snip
}
// sanitize ensures that a list of key-value pairs has a value for every key
// (adding a value if needed) and that each key is a string (substituting a key
// if needed).
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
if len(kvList)%2 != 0 {
kvList = append(kvList, noValue)
}
for i := 0; i < len(kvList); i += 2 {
_, ok := kvList[i].(string)
if !ok {
kvList[i] = f.nonStringKey(kvList[i])
}
}
return kvList
}
// Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved.
func (f *Formatter) Init(info logr.RuntimeInfo) {
f.depth += info.CallDepth
}
// Enabled checks whether an info message at the given level should be logged.
func (f Formatter) Enabled(level int) bool {
return level <= f.opts.Verbosity
}
// GetDepth returns the current depth of this Formatter. This is useful for
// implementations which do their own caller attribution.
func (f Formatter) GetDepth() int {
return f.depth
}
// FormatInfo renders an Info log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
prefix = ""
}
if f.opts.LogTimestamp {
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
}
if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller())
}
args = append(args, "level", level, "msg", msg)
return prefix, f.render(args, kvList)
}
// FormatError renders an Error log message into strings. The prefix will be
// empty when no names were set (via AddNames), or when the output is
// configured for JSON.
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
args := make([]interface{}, 0, 64) // using a constant here impacts perf
prefix = f.prefix
if f.outputFormat == outputJSON {
args = append(args, "logger", prefix)
prefix = ""
}
if f.opts.LogTimestamp {
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
}
if policy := f.opts.LogCaller; policy == All || policy == Error {
args = append(args, "caller", f.caller())
}
args = append(args, "msg", msg)
var loggableErr interface{}
if err != nil {
loggableErr = err.Error()
}
args = append(args, "error", loggableErr)
return f.prefix, f.render(args, kvList)
}
// AddName appends the specified name. funcr uses '/' characters to separate
// name elements. Callers should not pass '/' in the provided name string, but
// this library does not actually enforce that.
func (f *Formatter) AddName(name string) {
if len(f.prefix) > 0 {
f.prefix += "/"
}
f.prefix += name
}
// AddValues adds key-value pairs to the set of saved values to be logged with
// each log line.
func (f *Formatter) AddValues(kvList []interface{}) {
// Three slice args forces a copy.
n := len(f.values)
f.values = append(f.values[:n:n], kvList...)
vals := f.values
if hook := f.opts.RenderValuesHook; hook != nil {
vals = hook(f.sanitize(vals))
}
// Pre-render values, so we don't have to do it on each Info/Error call.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
f.flatten(buf, vals, false, true) // escape user-provided keys
f.valuesStr = buf.String()
}
// AddCallDepth increases the number of stack-frames to skip when attributing
// the log line to a file and line.
func (f *Formatter) AddCallDepth(depth int) {
f.depth += depth
}

510
vendor/github.com/go-logr/logr/logr.go generated vendored Normal file
View File

@ -0,0 +1,510 @@
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This design derives from Dave Cheney's blog:
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
// Package logr defines a general-purpose logging API and abstract interfaces
// to back that API. Packages in the Go ecosystem can depend on this package,
// while callers can implement logging with whatever backend is appropriate.
//
// Usage
//
// Logging is done using a Logger instance. Logger is a concrete type with
// methods, which defers the actual logging to a LogSink interface. The main
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
// are key/value pairs rather than printf-style formatted strings, emphasizing
// "structured logging".
//
// With Go's standard log package, we might write:
// log.Printf("setting target value %s", targetValue)
//
// With logr's structured logging, we'd write:
// logger.Info("setting target", "value", targetValue)
//
// Errors are much the same. Instead of:
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
//
// We'd write:
// logger.Error(err, "failed to open the pod bay door", "user", user)
//
// Info() and Error() are very similar, but they are separate methods so that
// LogSink implementations can choose to do things like attach additional
// information (such as stack traces) on calls to Error(). Error() messages are
// always logged, regardless of the current verbosity. If there is no error
// instance available, passing nil is valid.
//
// Verbosity
//
// Often we want to log information only when the application in "verbose
// mode". To write log lines that are more verbose, Logger has a V() method.
// The higher the V-level of a log line, the less critical it is considered.
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
// Error messages do not have a verbosity level and are always logged.
//
// Where we might have written:
// if flVerbose >= 2 {
// log.Printf("an unusual thing happened")
// }
//
// We can write:
// logger.V(2).Info("an unusual thing happened")
//
// Logger Names
//
// Logger instances can have name strings so that all messages logged through
// that instance have additional context. For example, you might want to add
// a subsystem name:
//
// logger.WithName("compactor").Info("started", "time", time.Now())
//
// The WithName() method returns a new Logger, which can be passed to
// constructors or other functions for further use. Repeated use of WithName()
// will accumulate name "segments". These name segments will be joined in some
// way by the LogSink implementation. It is strongly recommended that name
// segments contain simple identifiers (letters, digits, and hyphen), and do
// not contain characters that could muddle the log output or confuse the
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
// quotes, etc).
//
// Saved Values
//
// Logger instances can store any number of key/value pairs, which will be
// logged alongside all messages logged through that instance. For example,
// you might want to create a Logger instance per managed object:
//
// With the standard log package, we might write:
// log.Printf("decided to set field foo to value %q for object %s/%s",
// targetValue, object.Namespace, object.Name)
//
// With logr we'd write:
// // Elsewhere: set up the logger to log the object name.
// obj.logger = mainLogger.WithValues(
// "name", obj.name, "namespace", obj.namespace)
//
// // later on...
// obj.logger.Info("setting foo", "value", targetValue)
//
// Best Practices
//
// Logger has very few hard rules, with the goal that LogSink implementations
// might have a lot of freedom to differentiate. There are, however, some
// things to consider.
//
// The log message consists of a constant message attached to the log line.
// This should generally be a simple description of what's occurring, and should
// never be a format string. Variable information can then be attached using
// named values.
//
// Keys are arbitrary strings, but should generally be constant values. Values
// may be any Go value, but how the value is formatted is determined by the
// LogSink implementation.
//
// Logger instances are meant to be passed around by value. Code that receives
// such a value can call its methods without having to check whether the
// instance is ready for use.
//
// Calling methods with the null logger (Logger{}) as instance will crash
// because it has no LogSink. Therefore this null logger should never be passed
// around. For cases where passing a logger is optional, a pointer to Logger
// should be used.
//
// Key Naming Conventions
//
// Keys are not strictly required to conform to any specification or regex, but
// it is recommended that they:
// * be human-readable and meaningful (not auto-generated or simple ordinals)
// * be constant (not dependent on input data)
// * contain only printable characters
// * not contain whitespace or punctuation
// * use lower case for simple keys and lowerCamelCase for more complex ones
//
// These guidelines help ensure that log data is processed properly regardless
// of the log implementation. For example, log implementations will try to
// output JSON data or will store data for later database (e.g. SQL) queries.
//
// While users are generally free to use key names of their choice, it's
// generally best to avoid using the following keys, as they're frequently used
// by implementations:
// * "caller": the calling information (file/line) of a particular log line
// * "error": the underlying error value in the `Error` method
// * "level": the log level
// * "logger": the name of the associated logger
// * "msg": the log message
// * "stacktrace": the stack trace associated with a particular log line or
// error (often from the `Error` message)
// * "ts": the timestamp for a log line
//
// Implementations are encouraged to make use of these keys to represent the
// above concepts, when necessary (for example, in a pure-JSON output form, it
// would be necessary to represent at least message and timestamp as ordinary
// named values).
//
// Break Glass
//
// Implementations may choose to give callers access to the underlying
// logging implementation. The recommended pattern for this is:
// // Underlier exposes access to the underlying logging implementation.
// // Since callers only have a logr.Logger, they have to know which
// // implementation is in use, so this interface is less of an abstraction
// // and more of way to test type conversion.
// type Underlier interface {
// GetUnderlying() <underlying-type>
// }
//
// Logger grants access to the sink to enable type assertions like this:
// func DoSomethingWithImpl(log logr.Logger) {
// if underlier, ok := log.GetSink()(impl.Underlier) {
// implLogger := underlier.GetUnderlying()
// ...
// }
// }
//
// Custom `With*` functions can be implemented by copying the complete
// Logger struct and replacing the sink in the copy:
// // WithFooBar changes the foobar parameter in the log sink and returns a
// // new logger with that modified sink. It does nothing for loggers where
// // the sink doesn't support that parameter.
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
// }
// return log
// }
//
// Don't use New to construct a new Logger with a LogSink retrieved from an
// existing Logger. Source code attribution might not work correctly and
// unexported fields in Logger get lost.
//
// Beware that the same LogSink instance may be shared by different logger
// instances. Calling functions that modify the LogSink will affect all of
// those.
package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users.
func New(sink LogSink) Logger {
logger := Logger{}
logger.setSink(sink)
sink.Init(runtimeInfo)
return logger
}
// setSink stores the sink and updates any related fields. It mutates the
// logger and thus is only safe to use for loggers that are not currently being
// used concurrently.
func (l *Logger) setSink(sink LogSink) {
l.sink = sink
}
// GetSink returns the stored sink.
func (l Logger) GetSink() LogSink {
return l.sink
}
// WithSink returns a copy of the logger with the new sink.
func (l Logger) WithSink(sink LogSink) Logger {
l.setSink(sink)
return l
}
// Logger is an interface to an abstract logging implementation. This is a
// concrete type for performance reasons, but all the real work is passed on to
// a LogSink. Implementations of LogSink should provide their own constructors
// that return Logger, not LogSink.
//
// The underlying sink can be accessed through GetSink and be modified through
// WithSink. This enables the implementation of custom extensions (see "Break
// Glass" in the package documentation). Normally the sink should be used only
// indirectly.
type Logger struct {
sink LogSink
level int
}
// Enabled tests whether this Logger is enabled. For example, commandline
// flags might be used to set the logging verbosity and disable some info logs.
func (l Logger) Enabled() bool {
return l.sink.Enabled(l.level)
}
// Info logs a non-error message with the given key/value pairs as context.
//
// The msg argument should be used to add some constant description to the log
// line. The key/value pairs can then be used to add additional variable
// information. The key/value pairs must alternate string keys and arbitrary
// values.
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
if l.Enabled() {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
l.sink.Info(l.level, msg, keysAndValues...)
}
}
// Error logs an error, with the given message and key/value pairs as context.
// It functions similarly to Info, but may have unique behavior, and should be
// preferred for logging errors (see the package documentations for more
// information). The log message will always be emitted, regardless of
// verbosity level.
//
// The msg argument should be used to add context to any underlying error,
// while the err argument should be used to attach the actual error that
// triggered this log line, if present. The err parameter is optional
// and nil may be passed instead of an error instance.
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
withHelper.GetCallStackHelper()()
}
l.sink.Error(err, msg, keysAndValues...)
}
// V returns a new Logger instance for a specific verbosity level, relative to
// this Logger. In other words, V-levels are additive. A higher verbosity
// level means a log message is less important. Negative V-levels are treated
// as 0.
func (l Logger) V(level int) Logger {
if level < 0 {
level = 0
}
l.level += level
return l
}
// WithValues returns a new Logger instance with additional key/value pairs.
// See Info for documentation on how key/value pairs work.
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
l.setSink(l.sink.WithValues(keysAndValues...))
return l
}
// WithName returns a new Logger instance with the specified name element added
// to the Logger's name. Successive calls with WithName append additional
// suffixes to the Logger's name. It's strongly recommended that name segments
// contain only letters, digits, and hyphens (see the package documentation for
// more information).
func (l Logger) WithName(name string) Logger {
l.setSink(l.sink.WithName(name))
return l
}
// WithCallDepth returns a Logger instance that offsets the call stack by the
// specified number of frames when logging call site information, if possible.
// This is useful for users who have helper functions between the "real" call
// site and the actual calls to Logger methods. If depth is 0 the attribution
// should be to the direct caller of this function. If depth is 1 the
// attribution should skip 1 call frame, and so on. Successive calls to this
// are additive.
//
// If the underlying log implementation supports a WithCallDepth(int) method,
// it will be called and the result returned. If the implementation does not
// support CallDepthLogSink, the original Logger will be returned.
//
// To skip one level, WithCallStackHelper() should be used instead of
// WithCallDepth(1) because it works with implementions that support the
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
func (l Logger) WithCallDepth(depth int) Logger {
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(depth))
}
return l
}
// WithCallStackHelper returns a new Logger instance that skips the direct
// caller when logging call site information, if possible. This is useful for
// users who have helper functions between the "real" call site and the actual
// calls to Logger methods and want to support loggers which depend on marking
// each individual helper function, like loggers based on testing.T.
//
// In addition to using that new logger instance, callers also must call the
// returned function.
//
// If the underlying log implementation supports a WithCallDepth(int) method,
// WithCallDepth(1) will be called to produce a new logger. If it supports a
// WithCallStackHelper() method, that will be also called. If the
// implementation does not support either of these, the original Logger will be
// returned.
func (l Logger) WithCallStackHelper() (func(), Logger) {
var helper func()
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
l.setSink(withCallDepth.WithCallDepth(1))
}
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
helper = withHelper.GetCallStackHelper()
} else {
helper = func() {}
}
return helper, l
}
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know.
type RuntimeInfo struct {
// CallDepth is the number of call frames the logr library adds between the
// end-user and the LogSink. LogSink implementations which choose to print
// the original logging site (e.g. file & line) should climb this many
// additional frames to find it.
CallDepth int
}
// runtimeInfo is a static global. It must not be changed at run time.
var runtimeInfo = RuntimeInfo{
CallDepth: 1,
}
// LogSink represents a logging implementation. End-users will generally not
// interact with this type.
type LogSink interface {
// Init receives optional information about the logr library for LogSink
// implementations that need it.
Init(info RuntimeInfo)
// Enabled tests whether this LogSink is enabled at the specified V-level.
// For example, commandline flags might be used to set the logging
// verbosity and disable some info logs.
Enabled(level int) bool
// Info logs a non-error message with the given key/value pairs as context.
// The level argument is provided for optional logging. This method will
// only be called when Enabled(level) is true. See Logger.Info for more
// details.
Info(level int, msg string, keysAndValues ...interface{})
// Error logs an error, with the given message and key/value pairs as
// context. See Logger.Error for more details.
Error(err error, msg string, keysAndValues ...interface{})
// WithValues returns a new LogSink with additional key/value pairs. See
// Logger.WithValues for more details.
WithValues(keysAndValues ...interface{}) LogSink
// WithName returns a new LogSink with the specified name appended. See
// Logger.WithName for more details.
WithName(name string) LogSink
}
// CallDepthLogSink represents a Logger that knows how to climb the call stack
// to identify the original call site and can offset the depth by a specified
// number of frames. This is useful for users who have helper functions
// between the "real" call site and the actual calls to Logger methods.
// Implementations that log information about the call site (such as file,
// function, or line) would otherwise log information about the intermediate
// helper functions.
//
// This is an optional interface and implementations are not required to
// support it.
type CallDepthLogSink interface {
// WithCallDepth returns a LogSink that will offset the call
// stack by the specified number of frames when logging call
// site information.
//
// If depth is 0, the LogSink should skip exactly the number
// of call frames defined in RuntimeInfo.CallDepth when Info
// or Error are called, i.e. the attribution should be to the
// direct caller of Logger.Info or Logger.Error.
//
// If depth is 1 the attribution should skip 1 call frame, and so on.
// Successive calls to this are additive.
WithCallDepth(depth int) LogSink
}
// CallStackHelperLogSink represents a Logger that knows how to climb
// the call stack to identify the original call site and can skip
// intermediate helper functions if they mark themselves as
// helper. Go's testing package uses that approach.
//
// This is useful for users who have helper functions between the
// "real" call site and the actual calls to Logger methods.
// Implementations that log information about the call site (such as
// file, function, or line) would otherwise log information about the
// intermediate helper functions.
//
// This is an optional interface and implementations are not required
// to support it. Implementations that choose to support this must not
// simply implement it as WithCallDepth(1), because
// Logger.WithCallStackHelper will call both methods if they are
// present. This should only be implemented for LogSinks that actually
// need it, as with testing.T.
type CallStackHelperLogSink interface {
// GetCallStackHelper returns a function that must be called
// to mark the direct caller as helper function when logging
// call site information.
GetCallStackHelper() func()
}
// Marshaler is an optional interface that logged values may choose to
// implement. Loggers with structured output, such as JSON, should
// log the object return by the MarshalLog method instead of the
// original value.
type Marshaler interface {
// MarshalLog can be used to:
// - ensure that structs are not logged as strings when the original
// value has a String method: return a different type without a
// String method
// - select which fields of a complex type should get logged:
// return a simpler struct with fewer fields
// - log unexported fields: return a different struct
// with exported fields
//
// It may return any value of any type.
MarshalLog() interface{}
}

201
vendor/github.com/go-logr/stdr/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

6
vendor/github.com/go-logr/stdr/README.md generated vendored Normal file
View File

@ -0,0 +1,6 @@
# Minimal Go logging using logr and Go's standard library
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
This package implements the [logr interface](https://github.com/go-logr/logr)
in terms of Go's standard log package(https://pkg.go.dev/log).

170
vendor/github.com/go-logr/stdr/stdr.go generated vendored Normal file
View File

@ -0,0 +1,170 @@
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package stdr implements github.com/go-logr/logr.Logger in terms of
// Go's standard log package.
package stdr
import (
"log"
"os"
"github.com/go-logr/logr"
"github.com/go-logr/logr/funcr"
)
// The global verbosity level. See SetVerbosity().
var globalVerbosity int
// SetVerbosity sets the global level against which all info logs will be
// compared. If this is greater than or equal to the "V" of the logger, the
// message will be logged. A higher value here means more logs will be written.
// The previous verbosity value is returned. This is not concurrent-safe -
// callers must be sure to call it from only one goroutine.
func SetVerbosity(v int) int {
old := globalVerbosity
globalVerbosity = v
return old
}
// New returns a logr.Logger which is implemented by Go's standard log package,
// or something like it. If std is nil, this will use a default logger
// instead.
//
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
func New(std StdLogger) logr.Logger {
return NewWithOptions(std, Options{})
}
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
// log package, or something like it. See New for details.
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
if std == nil {
// Go's log.Default() is only available in 1.16 and higher.
std = log.New(os.Stderr, "", log.LstdFlags)
}
if opts.Depth < 0 {
opts.Depth = 0
}
fopts := funcr.Options{
LogCaller: funcr.MessageClass(opts.LogCaller),
}
sl := &logger{
Formatter: funcr.NewFormatter(fopts),
std: std,
}
// For skipping our own logger.Info/Error.
sl.Formatter.AddCallDepth(1 + opts.Depth)
return logr.New(sl)
}
// Options carries parameters which influence the way logs are generated.
type Options struct {
// Depth biases the assumed number of call frames to the "true" caller.
// This is useful when the calling code calls a function which then calls
// stdr (e.g. a logging shim to another API). Values less than zero will
// be treated as zero.
Depth int
// LogCaller tells stdr to add a "caller" key to some or all log lines.
// Go's log package has options to log this natively, too.
LogCaller MessageClass
// TODO: add an option to log the date/time
}
// MessageClass indicates which category or categories of messages to consider.
type MessageClass int
const (
// None ignores all message classes.
None MessageClass = iota
// All considers all message classes.
All
// Info only considers info messages.
Info
// Error only considers error messages.
Error
)
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
// this adapter.
type StdLogger interface {
// Output is the same as log.Output and log.Logger.Output.
Output(calldepth int, logline string) error
}
type logger struct {
funcr.Formatter
std StdLogger
}
var _ logr.LogSink = &logger{}
var _ logr.CallDepthLogSink = &logger{}
func (l logger) Enabled(level int) bool {
return globalVerbosity >= level
}
func (l logger) Info(level int, msg string, kvList ...interface{}) {
prefix, args := l.FormatInfo(level, msg, kvList)
if prefix != "" {
args = prefix + ": " + args
}
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
}
func (l logger) Error(err error, msg string, kvList ...interface{}) {
prefix, args := l.FormatError(err, msg, kvList)
if prefix != "" {
args = prefix + ": " + args
}
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
}
func (l logger) WithName(name string) logr.LogSink {
l.Formatter.AddName(name)
return &l
}
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
l.Formatter.AddValues(kvList)
return &l
}
func (l logger) WithCallDepth(depth int) logr.LogSink {
l.Formatter.AddCallDepth(depth)
return &l
}
// Underlier exposes access to the underlying logging implementation. Since
// callers only have a logr.Logger, they have to know which implementation is
// in use, so this interface is less of an abstraction and more of way to test
// type conversion.
type Underlier interface {
GetUnderlying() StdLogger
}
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
// is itself an interface, the result may or may not be a Go log.Logger.
func (l logger) GetUnderlying() StdLogger {
return l.std
}

View File

@ -2,7 +2,6 @@ package client
import (
"io"
"io/ioutil"
"net/http"
"sync/atomic"
)
@ -50,7 +49,7 @@ func (d *drainingReadCloser) Close() error {
// some bytes, but the closer ignores them to keep the underling
// connection open.
//nolint:errcheck
io.Copy(ioutil.Discard, d.rdr)
io.Copy(io.Discard, d.rdr)
}
return d.rdr.Close()
}

View File

@ -0,0 +1,207 @@
package client
import (
"fmt"
"net/http"
"strings"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/propagation"
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
"go.opentelemetry.io/otel/trace"
)
const (
instrumentationVersion = "1.0.0"
tracerName = "go-openapi"
)
type config struct {
Tracer trace.Tracer
Propagator propagation.TextMapPropagator
SpanStartOptions []trace.SpanStartOption
SpanNameFormatter func(*runtime.ClientOperation) string
TracerProvider trace.TracerProvider
}
type OpenTelemetryOpt interface {
apply(*config)
}
type optionFunc func(*config)
func (o optionFunc) apply(c *config) {
o(c)
}
// WithTracerProvider specifies a tracer provider to use for creating a tracer.
// If none is specified, the global provider is used.
func WithTracerProvider(provider trace.TracerProvider) OpenTelemetryOpt {
return optionFunc(func(c *config) {
if provider != nil {
c.TracerProvider = provider
}
})
}
// WithPropagators configures specific propagators. If this
// option isn't specified, then the global TextMapPropagator is used.
func WithPropagators(ps propagation.TextMapPropagator) OpenTelemetryOpt {
return optionFunc(func(c *config) {
if ps != nil {
c.Propagator = ps
}
})
}
// WithSpanOptions configures an additional set of
// trace.SpanOptions, which are applied to each new span.
func WithSpanOptions(opts ...trace.SpanStartOption) OpenTelemetryOpt {
return optionFunc(func(c *config) {
c.SpanStartOptions = append(c.SpanStartOptions, opts...)
})
}
// WithSpanNameFormatter takes a function that will be called on every
// request and the returned string will become the Span Name.
func WithSpanNameFormatter(f func(op *runtime.ClientOperation) string) OpenTelemetryOpt {
return optionFunc(func(c *config) {
c.SpanNameFormatter = f
})
}
func defaultTransportFormatter(op *runtime.ClientOperation) string {
if op.ID != "" {
return op.ID
}
return fmt.Sprintf("%s_%s", strings.ToLower(op.Method), op.PathPattern)
}
type openTelemetryTransport struct {
transport runtime.ClientTransport
host string
tracer trace.Tracer
config *config
}
func newOpenTelemetryTransport(transport runtime.ClientTransport, host string, opts []OpenTelemetryOpt) *openTelemetryTransport {
tr := &openTelemetryTransport{
transport: transport,
host: host,
}
defaultOpts := []OpenTelemetryOpt{
WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)),
WithSpanNameFormatter(defaultTransportFormatter),
WithPropagators(otel.GetTextMapPropagator()),
WithTracerProvider(otel.GetTracerProvider()),
}
c := newConfig(append(defaultOpts, opts...)...)
tr.config = c
return tr
}
func (t *openTelemetryTransport) Submit(op *runtime.ClientOperation) (interface{}, error) {
if op.Context == nil {
return t.transport.Submit(op)
}
params := op.Params
reader := op.Reader
var span trace.Span
defer func() {
if span != nil {
span.End()
}
}()
op.Params = runtime.ClientRequestWriterFunc(func(req runtime.ClientRequest, reg strfmt.Registry) error {
span = t.newOpenTelemetrySpan(op, req.GetHeaderParams())
return params.WriteToRequest(req, reg)
})
op.Reader = runtime.ClientResponseReaderFunc(func(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
if span != nil {
statusCode := response.Code()
span.SetAttributes(attribute.Int(string(semconv.HTTPStatusCodeKey), statusCode))
span.SetStatus(semconv.SpanStatusFromHTTPStatusCodeAndSpanKind(statusCode, trace.SpanKindClient))
}
return reader.ReadResponse(response, consumer)
})
submit, err := t.transport.Submit(op)
if err != nil && span != nil {
span.RecordError(err)
span.SetStatus(codes.Error, err.Error())
}
return submit, err
}
func (t *openTelemetryTransport) newOpenTelemetrySpan(op *runtime.ClientOperation, header http.Header) trace.Span {
ctx := op.Context
tracer := t.tracer
if tracer == nil {
if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() {
tracer = newTracer(span.TracerProvider())
} else {
tracer = newTracer(otel.GetTracerProvider())
}
}
ctx, span := tracer.Start(ctx, t.config.SpanNameFormatter(op), t.config.SpanStartOptions...)
var scheme string
if len(op.Schemes) > 0 {
scheme = op.Schemes[0]
}
span.SetAttributes(
attribute.String("net.peer.name", t.host),
attribute.String(string(semconv.HTTPRouteKey), op.PathPattern),
attribute.String(string(semconv.HTTPMethodKey), op.Method),
attribute.String("span.kind", trace.SpanKindClient.String()),
attribute.String("http.scheme", scheme),
)
carrier := propagation.HeaderCarrier(header)
t.config.Propagator.Inject(ctx, carrier)
return span
}
func newTracer(tp trace.TracerProvider) trace.Tracer {
return tp.Tracer(tracerName, trace.WithInstrumentationVersion(version()))
}
func newConfig(opts ...OpenTelemetryOpt) *config {
c := &config{
Propagator: otel.GetTextMapPropagator(),
}
for _, opt := range opts {
opt.apply(c)
}
// Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context.
if c.TracerProvider != nil {
c.Tracer = newTracer(c.TracerProvider)
}
return c
}
// Version is the current release version of the go-runtime instrumentation.
func version() string {
return instrumentationVersion
}

View File

@ -23,6 +23,8 @@ import (
var _ runtime.ClientResponse = response{}
func newResponse(resp *http.Response) runtime.ClientResponse { return response{resp: resp} }
type response struct {
resp *http.Response
}

View File

@ -23,21 +23,21 @@ import (
"crypto/x509"
"encoding/pem"
"fmt"
"io/ioutil"
"mime"
"net/http"
"net/http/httputil"
"os"
"strings"
"sync"
"time"
"github.com/go-openapi/strfmt"
"github.com/opentracing/opentracing-go"
"github.com/go-openapi/runtime"
"github.com/go-openapi/runtime/logger"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/runtime/yamlpc"
"github.com/go-openapi/strfmt"
)
// TLSClientOptions to configure client authentication with mutual TLS
@ -164,7 +164,7 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
cfg.RootCAs = caCertPool
} else if opts.CA != "" {
// load ca cert
caCert, err := ioutil.ReadFile(opts.CA)
caCert, err := os.ReadFile(opts.CA)
if err != nil {
return nil, fmt.Errorf("tls client ca: %v", err)
}
@ -181,8 +181,6 @@ func TLSClientAuth(opts TLSClientOptions) (*tls.Config, error) {
cfg.ServerName = opts.ServerName
}
cfg.BuildNameToCertificate()
return cfg, nil
}
@ -225,7 +223,7 @@ type Runtime struct {
Transport http.RoundTripper
Jar http.CookieJar
//Spec *spec.Document
// Spec *spec.Document
Host string
BasePath string
Formats strfmt.Registry
@ -237,6 +235,7 @@ type Runtime struct {
clientOnce *sync.Once
client *http.Client
schemes []string
response ClientResponseFunc
}
// New creates a new default runtime for a swagger api runtime.Client
@ -275,6 +274,7 @@ func New(host, basePath string, schemes []string) *Runtime {
rt.Debug = logger.DebugEnabled()
rt.logger = logger.StandardLogger{}
rt.response = newResponse
if len(schemes) > 0 {
rt.schemes = schemes
@ -301,6 +301,14 @@ func (r *Runtime) WithOpenTracing(opts ...opentracing.StartSpanOption) runtime.C
return newOpenTracingTransport(r, r.Host, opts)
}
// WithOpenTelemetry adds opentelemetry support to the provided runtime.
// A new client span is created for each request.
// If the context of the client operation does not contain an active span, no span is created.
// The provided opts are applied to each spans - for example to add global tags.
func (r *Runtime) WithOpenTelemetry(opts ...OpenTelemetryOpt) runtime.ClientTransport {
return newOpenTelemetryTransport(r, r.Host, opts)
}
func (r *Runtime) pickScheme(schemes []string) string {
if v := r.selectScheme(r.schemes); v != "" {
return v
@ -329,6 +337,7 @@ func (r *Runtime) selectScheme(schemes []string) string {
}
return scheme
}
func transportOrDefault(left, right http.RoundTripper) http.RoundTripper {
if left == nil {
return right
@ -381,7 +390,7 @@ func (r *Runtime) createHttpRequest(operation *runtime.ClientOperation) (*reques
return r.DefaultAuthentication.AuthenticateRequest(req, reg)
})
}
//if auth != nil {
// if auth != nil {
// if err := auth.AuthenticateRequest(request, r.Formats); err != nil {
// return nil, err
// }
@ -500,7 +509,7 @@ func (r *Runtime) Submit(operation *runtime.ClientOperation) (interface{}, error
return nil, fmt.Errorf("no consumer: %q", ct)
}
}
return readResponse.ReadResponse(response{res}, cons)
return readResponse.ReadResponse(r.response(res), cons)
}
// SetDebug changes the debug flag.
@ -516,3 +525,13 @@ func (r *Runtime) SetLogger(logger logger.Logger) {
r.logger = logger
middleware.Logger = logger
}
type ClientResponseFunc = func(*http.Response) runtime.ClientResponse
// SetResponseReader changes the response reader implementation.
func (r *Runtime) SetResponseReader(f ClientResponseFunc) {
if f == nil {
return
}
r.response = f
}

View File

@ -16,7 +16,6 @@ package runtime
import (
"io"
"io/ioutil"
"net/http"
"net/url"
"time"
@ -79,7 +78,7 @@ type NamedReadCloser interface {
func NamedReader(name string, rdr io.Reader) NamedReadCloser {
rc, ok := rdr.(io.ReadCloser)
if !ok {
rc = ioutil.NopCloser(rdr)
rc = io.NopCloser(rdr)
}
return &namedReadCloser{
name: name,

View File

@ -195,6 +195,17 @@ func NewRoutableContext(spec *loads.Document, routableAPI RoutableAPI, routes Ro
if spec != nil {
an = analysis.New(spec.Spec())
}
return NewRoutableContextWithAnalyzedSpec(spec, an, routableAPI, routes)
}
// NewRoutableContextWithAnalyzedSpec is like NewRoutableContext but takes in input the analysed spec too
func NewRoutableContextWithAnalyzedSpec(spec *loads.Document, an *analysis.Spec, routableAPI RoutableAPI, routes Router) *Context {
// Either there are no spec doc and analysis, or both of them.
if !((spec == nil && an == nil) || (spec != nil && an != nil)) {
panic(errors.New(http.StatusInternalServerError, "routable context requires either both spec doc and analysis, or none of them"))
}
ctx := &Context{spec: spec, api: routableAPI, analyzer: an, router: routes}
return ctx
}
@ -498,7 +509,9 @@ func (c *Context) Respond(rw http.ResponseWriter, r *http.Request, produces []st
if resp, ok := data.(Responder); ok {
producers := route.Producers
prod, ok := producers[format]
// producers contains keys with normalized format, if a format has MIME type parameter such as `text/plain; charset=utf-8`
// then you must provide `text/plain` to get the correct producer. HOWEVER, format here is not normalized.
prod, ok := producers[normalizeOffer(format)]
if !ok {
prods := c.api.ProducersFor(normalizeOffers([]string{c.api.DefaultProduces()}))
pr, ok := prods[c.api.DefaultProduces()]

View File

@ -206,7 +206,11 @@ func (p *untypedParamBinder) Bind(request *http.Request, routeParams RouteParams
if p.parameter.Type == "file" {
file, header, ffErr := request.FormFile(p.parameter.Name)
if ffErr != nil {
return errors.NewParseError(p.Name, p.parameter.In, "", ffErr)
if p.parameter.Required {
return errors.NewParseError(p.Name, p.parameter.In, "", ffErr)
} else {
return nil
}
}
target.Set(reflect.ValueOf(runtime.File{Data: file, Header: header}))
return nil

View File

@ -16,6 +16,8 @@ type SwaggerUIOpts struct {
Path string
// SpecURL the url to find the spec for
SpecURL string
// OAuthCallbackURL the url called after OAuth2 login
OAuthCallbackURL string
// The three components needed to embed swagger-ui
SwaggerURL string
@ -40,6 +42,9 @@ func (r *SwaggerUIOpts) EnsureDefaults() {
if r.SpecURL == "" {
r.SpecURL = "/swagger.json"
}
if r.OAuthCallbackURL == "" {
r.OAuthCallbackURL = path.Join(r.BasePath, r.Path, "oauth2-callback")
}
if r.SwaggerURL == "" {
r.SwaggerURL = swaggerLatest
}
@ -149,7 +154,8 @@ const (
plugins: [
SwaggerUIBundle.plugins.DownloadUrl
],
layout: "StandaloneLayout"
layout: "StandaloneLayout",
oauth2RedirectUrl: '{{ .OAuthCallbackURL }}'
})
// End Swagger UI call region

View File

@ -0,0 +1,122 @@
package middleware
import (
"bytes"
"fmt"
"net/http"
"path"
"text/template"
)
func SwaggerUIOAuth2Callback(opts SwaggerUIOpts, next http.Handler) http.Handler {
opts.EnsureDefaults()
pth := opts.OAuthCallbackURL
tmpl := template.Must(template.New("swaggeroauth").Parse(swaggerOAuthTemplate))
buf := bytes.NewBuffer(nil)
_ = tmpl.Execute(buf, &opts)
b := buf.Bytes()
return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
if path.Join(r.URL.Path) == pth {
rw.Header().Set("Content-Type", "text/html; charset=utf-8")
rw.WriteHeader(http.StatusOK)
_, _ = rw.Write(b)
return
}
if next == nil {
rw.Header().Set("Content-Type", "text/plain")
rw.WriteHeader(http.StatusNotFound)
_, _ = rw.Write([]byte(fmt.Sprintf("%q not found", pth)))
return
}
next.ServeHTTP(rw, r)
})
}
const (
swaggerOAuthTemplate = `
<!DOCTYPE html>
<html lang="en">
<head>
<title>{{ .Title }}</title>
</head>
<body>
<script>
'use strict';
function run () {
var oauth2 = window.opener.swaggerUIRedirectOauth2;
var sentState = oauth2.state;
var redirectUrl = oauth2.redirectUrl;
var isValid, qp, arr;
if (/code|token|error/.test(window.location.hash)) {
qp = window.location.hash.substring(1).replace('?', '&');
} else {
qp = location.search.substring(1);
}
arr = qp.split("&");
arr.forEach(function (v,i,_arr) { _arr[i] = '"' + v.replace('=', '":"') + '"';});
qp = qp ? JSON.parse('{' + arr.join() + '}',
function (key, value) {
return key === "" ? value : decodeURIComponent(value);
}
) : {};
isValid = qp.state === sentState;
if ((
oauth2.auth.schema.get("flow") === "accessCode" ||
oauth2.auth.schema.get("flow") === "authorizationCode" ||
oauth2.auth.schema.get("flow") === "authorization_code"
) && !oauth2.auth.code) {
if (!isValid) {
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "warning",
message: "Authorization may be unsafe, passed state was changed in server. The passed state wasn't returned from auth server."
});
}
if (qp.code) {
delete oauth2.state;
oauth2.auth.code = qp.code;
oauth2.callback({auth: oauth2.auth, redirectUrl: redirectUrl});
} else {
let oauthErrorMsg;
if (qp.error) {
oauthErrorMsg = "["+qp.error+"]: " +
(qp.error_description ? qp.error_description+ ". " : "no accessCode received from the server. ") +
(qp.error_uri ? "More info: "+qp.error_uri : "");
}
oauth2.errCb({
authId: oauth2.auth.name,
source: "auth",
level: "error",
message: oauthErrorMsg || "[Authorization failed]: no accessCode received from the server."
});
}
} else {
oauth2.callback({auth: oauth2.auth, token: qp, isValid: isValid, redirectUrl: redirectUrl});
}
window.close();
}
if (document.readyState !== 'loading') {
run();
} else {
document.addEventListener('DOMContentLoaded', function () {
run();
});
}
</script>
</body>
</html>
`
)

View File

@ -14,31 +14,40 @@ linters-settings:
min-occurrences: 4
linters:
enable-all: true
disable:
- maligned
- lll
- gochecknoinits
- gochecknoglobals
- godox
- gocognit
- whitespace
- wsl
- funlen
- wrapcheck
- testpackage
- nlreturn
- gofumpt
- goerr113
- gci
- gomnd
- godot
- exhaustivestruct
- paralleltest
- varnamelen
- ireturn
- exhaustruct
#- thelper
enable:
- revive
- goimports
- gosec
- unparam
- unconvert
- predeclared
- prealloc
- misspell
# disable:
# - maligned
# - lll
# - gochecknoinits
# - gochecknoglobals
# - godox
# - gocognit
# - whitespace
# - wsl
# - funlen
# - wrapcheck
# - testpackage
# - nlreturn
# - gofumpt
# - goerr113
# - gci
# - gomnd
# - godot
# - exhaustivestruct
# - paralleltest
# - varnamelen
# - ireturn
# - exhaustruct
# #- thelper
issues:
exclude-rules:

View File

@ -142,7 +142,7 @@ func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) {
// BSON value representation of themselves. The BSON bytes and type can be
// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
// wishes to retain the data after returning.
func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error {
func (id *ObjectId) UnmarshalBSONValue(_ bsontype.Type, data []byte) error {
var oid bsonprim.ObjectID
copy(oid[:], data)
*id = ObjectId(oid)

View File

@ -57,7 +57,7 @@ func (d *Date) UnmarshalText(text []byte) error {
if len(text) == 0 {
return nil
}
dd, err := time.Parse(RFC3339FullDate, string(text))
dd, err := time.ParseInLocation(RFC3339FullDate, string(text), DefaultTimeLocation)
if err != nil {
return err
}
@ -107,7 +107,7 @@ func (d *Date) UnmarshalJSON(data []byte) error {
if err := json.Unmarshal(data, &strdate); err != nil {
return err
}
tt, err := time.Parse(RFC3339FullDate, strdate)
tt, err := time.ParseInLocation(RFC3339FullDate, strdate, DefaultTimeLocation)
if err != nil {
return err
}
@ -126,7 +126,7 @@ func (d *Date) UnmarshalBSON(data []byte) error {
}
if data, ok := m["data"].(string); ok {
rd, err := time.Parse(RFC3339FullDate, data)
rd, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation)
if err != nil {
return err
}

View File

@ -109,7 +109,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc { //
if to == tpe {
switch v.Name {
case "date":
d, err := time.Parse(RFC3339FullDate, data)
d, err := time.ParseInLocation(RFC3339FullDate, data, DefaultTimeLocation)
if err != nil {
return nil, err
}

View File

@ -29,6 +29,12 @@ import (
"go.mongodb.org/mongo-driver/bson/bsontype"
)
var (
// UnixZero sets the zero unix timestamp we want to compare against.
// Unix 0 for an EST timezone is not equivalent to a UTC timezone.
UnixZero = time.Unix(0, 0).UTC()
)
func init() {
dt := DateTime{}
Default.Add("datetime", &dt, IsDateTime)
@ -86,6 +92,9 @@ var (
// NormalizeTimeForMarshal provides a normalization function on time befeore marshalling (e.g. time.UTC).
// By default, the time value is not changed.
NormalizeTimeForMarshal = func(t time.Time) time.Time { return t }
// DefaultTimeLocation provides a location for a time when the time zone is not encoded in the string (ex: ISO8601 Local variants).
DefaultTimeLocation = time.UTC
)
// ParseDateTime parses a string that represents an ISO8601 time or a unix epoch
@ -95,7 +104,7 @@ func ParseDateTime(data string) (DateTime, error) {
}
var lastError error
for _, layout := range DateTimeFormats {
dd, err := time.Parse(layout, data)
dd, err := time.ParseInLocation(layout, data, DefaultTimeLocation)
if err != nil {
lastError = err
continue
@ -123,6 +132,22 @@ func (t DateTime) String() string {
return NormalizeTimeForMarshal(time.Time(t)).Format(MarshalFormat)
}
// IsZero returns whether the date time is a zero value
func (t *DateTime) IsZero() bool {
if t == nil {
return true
}
return time.Time(*t).IsZero()
}
// IsUnixZerom returns whether the date time is equivalent to time.Unix(0, 0).UTC().
func (t *DateTime) IsUnixZero() bool {
if t == nil {
return true
}
return time.Time(*t).Equal(UnixZero)
}
// MarshalText implements the text marshaller interface
func (t DateTime) MarshalText() ([]byte, error) {
return []byte(t.String()), nil

View File

@ -15,9 +15,12 @@ import (
// ULID represents a ulid string format
// ref:
// https://github.com/ulid/spec
//
// https://github.com/ulid/spec
//
// impl:
// https://github.com/oklog/ulid
//
// https://github.com/oklog/ulid
//
// swagger:strfmt ulid
type ULID struct {
@ -89,7 +92,9 @@ func NewULIDZero() ULID {
}
// NewULID generates new unique ULID value and a error if any
func NewULID() (u ULID, err error) {
func NewULID() (ULID, error) {
var u ULID
obj := ulidEntropyPool.Get()
entropy, ok := obj.(io.Reader)
if !ok {

View File

@ -248,7 +248,7 @@ func MinimumUint(path, in string, data, min uint64, exclusive bool) *errors.Vali
// MultipleOf validates if the provided number is a multiple of the factor
func MultipleOf(path, in string, data, factor float64) *errors.Validation {
// multipleOf factor must be positive
if factor < 0 {
if factor <= 0 {
return errors.MultipleOfMustBePositive(path, in, factor)
}
var mult float64
@ -266,7 +266,7 @@ func MultipleOf(path, in string, data, factor float64) *errors.Validation {
// MultipleOfInt validates if the provided integer is a multiple of the factor
func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation {
// multipleOf factor must be positive
if factor < 0 {
if factor <= 0 {
return errors.MultipleOfMustBePositive(path, in, factor)
}
mult := data / factor
@ -278,6 +278,10 @@ func MultipleOfInt(path, in string, data int64, factor int64) *errors.Validation
// MultipleOfUint validates if the provided unsigned integer is a multiple of the factor
func MultipleOfUint(path, in string, data, factor uint64) *errors.Validation {
// multipleOf factor must be positive
if factor == 0 {
return errors.MultipleOfMustBePositive(path, in, factor)
}
mult := data / factor
if mult*factor != data {
return errors.NotMultipleOf(path, in, factor, data)

View File

@ -1,10 +1,8 @@
## locales
<img align="right" src="https://raw.githubusercontent.com/go-playground/locales/master/logo.png">![Project status](https://img.shields.io/badge/version-0.14.0-green.svg)
<img align="right" src="https://raw.githubusercontent.com/go-playground/locales/master/logo.png">![Project status](https://img.shields.io/badge/version-0.14.1-green.svg)
[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales)
[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales)
![License](https://img.shields.io/dub/l/vibe-d.svg)
[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within
an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator).

View File

@ -1,11 +1,9 @@
## universal-translator
<img align="right" src="https://raw.githubusercontent.com/go-playground/universal-translator/master/logo.png">![Project status](https://img.shields.io/badge/version-0.18.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator)
<img align="right" src="https://raw.githubusercontent.com/go-playground/universal-translator/master/logo.png">![Project status](https://img.shields.io/badge/version-0.18.1-green.svg)
[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator)
[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator)
![License](https://img.shields.io/dub/l/vibe-d.svg)
[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules

View File

@ -3,7 +3,6 @@ package ut
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
@ -41,7 +40,6 @@ const (
func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error {
_, err := os.Stat(dirname)
fmt.Println(dirname, err, os.IsNotExist(err))
if err != nil {
if !os.IsNotExist(err) {
@ -138,7 +136,7 @@ func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string)
return err
}
err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
err = os.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644)
if err != nil {
return err
}
@ -200,7 +198,7 @@ func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilenam
// NOTE: generally used when assets have been embedded into the binary and are already in memory.
func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error {
b, err := ioutil.ReadAll(reader)
b, err := io.ReadAll(reader)
if err != nil {
return err
}

View File

@ -28,3 +28,4 @@ _testmain.go
*.txt
cover.html
README.html
.idea

View File

@ -1,7 +1,7 @@
Package validator
=================
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v9/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.11.1-green.svg)
<img align="right" src="https://raw.githubusercontent.com/go-playground/validator/v10/logo.png">[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
![Project status](https://img.shields.io/badge/version-10.12.0-green.svg)
[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator)
[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator)
@ -73,8 +73,8 @@ Baked-in Validations
| - | - |
| eqcsfield | Field Equals Another Field (relative)|
| eqfield | Field Equals Another Field |
| fieldcontains | NOT DOCUMENTED IN doc.go |
| fieldexcludes | NOT DOCUMENTED IN doc.go |
| fieldcontains | Check the indicated characters are present in the Field |
| fieldexcludes | Check the indicated characters are not present in the field |
| gtcsfield | Field Greater Than Another Relative Field |
| gtecsfield | Field Greater Than or Equal To Another Relative Field |
| gtefield | Field Greater Than or Equal To Another Field |
@ -114,6 +114,7 @@ Baked-in Validations
| unix_addr | Unix domain socket end point Address |
| uri | URI String |
| url | URL String |
| http_url | HTTP URL String |
| url_encoded | URL Encoded |
| urn_rfc2141 | Urn RFC 2141 String |
@ -137,7 +138,7 @@ Baked-in Validations
| excludesrune | Excludes Rune |
| lowercase | Lowercase |
| multibyte | Multi-Byte Characters |
| number | NOT DOCUMENTED IN doc.go |
| number | Number |
| numeric | Numeric |
| printascii | Printable ASCII |
| startsnotwith | Starts Not With |
@ -149,11 +150,14 @@ Baked-in Validations
| - | - |
| base64 | Base64 String |
| base64url | Base64URL String |
| base64rawurl | Base64RawURL String |
| bic | Business Identifier Code (ISO 9362) |
| bcp47_language_tag | Language tag (BCP 47) |
| btc_addr | Bitcoin Address |
| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) |
| credit_card | Credit Card Number |
| mongodb | MongoDB ObjectID |
| cron | Cron |
| datetime | Datetime |
| e164 | e164 formatted phone number |
| email | E-mail String
@ -176,6 +180,7 @@ Baked-in Validations
| jwt | JSON Web Token (JWT) |
| latitude | Latitude |
| longitude | Longitude |
| luhn_checksum | Luhn Algorithm Checksum (for strings and (u)int) |
| postcode_iso3166_alpha2 | Postcode |
| postcode_iso3166_alpha2_field | Postcode |
| rgb | RGB String |
@ -202,22 +207,27 @@ Baked-in Validations
| tiger192 | TIGER192 hash |
| semver | Semantic Versioning 2.0.0 |
| ulid | Universally Unique Lexicographically Sortable Identifier ULID |
| cve | Common Vulnerabilities and Exposures Identifier (CVE id) |
### Comparisons:
| Tag | Description |
| - | - |
| eq | Equals |
| eq_ignore_case | Equals ignoring case |
| gt | Greater than|
| gte | Greater than or equal |
| lt | Less Than |
| lte | Less Than or Equal |
| ne | Not Equal |
| ne_ignore_case | Not Equal ignoring case |
### Other:
| Tag | Description |
| - | - |
| dir | Directory |
| file | File path |
| dir | Existing Directory |
| dirpath | Directory Path |
| file | Existing File |
| filepath | File Path |
| isdefault | Is Default |
| len | Length |
| max | Maximum |

View File

@ -7,6 +7,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io/fs"
"net"
"net/url"
"os"
@ -14,13 +15,14 @@ import (
"strconv"
"strings"
"sync"
"syscall"
"time"
"unicode/utf8"
"golang.org/x/crypto/sha3"
"golang.org/x/text/language"
urn "github.com/leodido/go-urn"
"github.com/leodido/go-urn"
)
// Func accepts a FieldLevel interface for all validation needs. The return
@ -86,7 +88,9 @@ var (
"min": hasMinOf,
"max": hasMaxOf,
"eq": isEq,
"eq_ignore_case": isEqIgnoreCase,
"ne": isNe,
"ne_ignore_case": isNeIgnoreCase,
"lt": isLt,
"lte": isLte,
"gt": isGt,
@ -121,11 +125,14 @@ var (
"e164": isE164,
"email": isEmail,
"url": isURL,
"http_url": isHttpURL,
"uri": isURI,
"urn_rfc2141": isUrnRFC2141, // RFC 2141
"file": isFile,
"filepath": isFilePath,
"base64": isBase64,
"base64url": isBase64URL,
"base64rawurl": isBase64RawURL,
"contains": contains,
"containsany": containsAny,
"containsrune": containsRune,
@ -140,6 +147,7 @@ var (
"isbn10": isISBN10,
"isbn13": isISBN13,
"eth_addr": isEthereumAddress,
"eth_addr_checksum": isEthereumAddressChecksum,
"btc_addr": isBitcoinAddress,
"btc_addr_bech32": isBitcoinBech32Address,
"uuid": isUUID,
@ -194,6 +202,7 @@ var (
"html_encoded": isHTMLEncoded,
"url_encoded": isURLEncoded,
"dir": isDir,
"dirpath": isDirPath,
"json": isJSON,
"jwt": isJWT,
"hostname_port": isHostnamePort,
@ -214,6 +223,10 @@ var (
"semver": isSemverFormat,
"dns_rfc1035_label": isDnsRFC1035LabelFormat,
"credit_card": isCreditCard,
"cve": isCveFormat,
"luhn_checksum": hasLuhnChecksum,
"mongodb": isMongoDB,
"cron": isCron,
}
)
@ -307,18 +320,42 @@ func isUnique(fl FieldLevel) bool {
}
m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type()))
var fieldlen int
for i := 0; i < field.Len(); i++ {
m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v)
key := reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param))
if key.IsValid() {
fieldlen++
m.SetMapIndex(key, v)
}
}
return field.Len() == m.Len()
return fieldlen == m.Len()
case reflect.Map:
m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
var m reflect.Value
if field.Type().Elem().Kind() == reflect.Ptr {
m = reflect.MakeMap(reflect.MapOf(field.Type().Elem().Elem(), v.Type()))
} else {
m = reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type()))
}
for _, k := range field.MapKeys() {
m.SetMapIndex(field.MapIndex(k), v)
m.SetMapIndex(reflect.Indirect(field.MapIndex(k)), v)
}
return field.Len() == m.Len()
default:
if parent := fl.Parent(); parent.Kind() == reflect.Struct {
uniqueField := parent.FieldByName(param)
if uniqueField == reflect.ValueOf(nil) {
panic(fmt.Sprintf("Bad field name provided %s", param))
}
if uniqueField.Kind() != field.Kind() {
panic(fmt.Sprintf("Bad field type %T:%T", field.Interface(), uniqueField.Interface()))
}
return field.Interface() != uniqueField.Interface()
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
}
@ -613,14 +650,16 @@ func isISBN10(fl FieldLevel) bool {
func isEthereumAddress(fl FieldLevel) bool {
address := fl.Field().String()
return ethAddressRegex.MatchString(address)
}
// isEthereumAddressChecksum is the validation function for validating if the field's value is a valid checksumed Ethereum address.
func isEthereumAddressChecksum(fl FieldLevel) bool {
address := fl.Field().String()
if !ethAddressRegex.MatchString(address) {
return false
}
if ethAddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) {
return true
}
// Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md
address = address[2:] // Skip "0x" prefix.
h := sha3.NewLegacyKeccak256()
@ -889,6 +928,12 @@ func isNe(fl FieldLevel) bool {
return !isEq(fl)
}
// isNe is the validation function for validating that the field's string value does not equal the
// provided param value. The comparison is case-insensitive
func isNeIgnoreCase(fl FieldLevel) bool {
return !isEqIgnoreCase(fl)
}
// isLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value.
func isLteCrossStructField(fl FieldLevel) bool {
field := fl.Field()
@ -1260,6 +1305,22 @@ func isEq(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isEqIgnoreCase is the validation function for validating if the current field's string value is
// equal to the param's value.
// The comparison is case-insensitive.
func isEqIgnoreCase(fl FieldLevel) bool {
field := fl.Field()
param := fl.Param()
switch field.Kind() {
case reflect.String:
return strings.EqualFold(field.String(), param)
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isPostcodeByIso3166Alpha2 validates by value which is country code in iso 3166 alpha 2
// example: `postcode_iso3166_alpha2=US`
func isPostcodeByIso3166Alpha2(fl FieldLevel) bool {
@ -1311,6 +1372,11 @@ func isBase64URL(fl FieldLevel) bool {
return base64URLRegex.MatchString(fl.Field().String())
}
// isBase64RawURL is the validation function for validating if the current field's value is a valid base64 URL safe string without '=' padding.
func isBase64RawURL(fl FieldLevel) bool {
return base64RawURLRegex.MatchString(fl.Field().String())
}
// isURI is the validation function for validating if the current field's value is a valid URI.
func isURI(fl FieldLevel) bool {
field := fl.Field()
@ -1370,6 +1436,23 @@ func isURL(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isHttpURL is the validation function for validating if the current field's value is a valid HTTP(s) URL.
func isHttpURL(fl FieldLevel) bool {
if !isURL(fl) {
return false
}
field := fl.Field()
switch field.Kind() {
case reflect.String:
s := strings.ToLower(field.String())
return strings.HasPrefix(s, "http://") || strings.HasPrefix(s, "https://")
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141.
func isUrnRFC2141(fl FieldLevel) bool {
field := fl.Field()
@ -1387,7 +1470,7 @@ func isUrnRFC2141(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isFile is the validation function for validating if the current field's value is a valid file path.
// isFile is the validation function for validating if the current field's value is a valid existing file path.
func isFile(fl FieldLevel) bool {
field := fl.Field()
@ -1404,6 +1487,57 @@ func isFile(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isFilePath is the validation function for validating if the current field's value is a valid file path.
func isFilePath(fl FieldLevel) bool {
var exists bool
var err error
field := fl.Field()
// If it exists, it obviously is valid.
// This is done first to avoid code duplication and unnecessary additional logic.
if exists = isFile(fl); exists {
return true
}
// It does not exist but may still be a valid filepath.
switch field.Kind() {
case reflect.String:
// Every OS allows for whitespace, but none
// let you use a file with no filename (to my knowledge).
// Unless you're dealing with raw inodes, but I digress.
if strings.TrimSpace(field.String()) == "" {
return false
}
// We make sure it isn't a directory.
if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
return false
}
if _, err = os.Stat(field.String()); err != nil {
switch t := err.(type) {
case *fs.PathError:
if t.Err == syscall.EINVAL {
// It's definitely an invalid character in the filepath.
return false
}
// It could be a permission error, a does-not-exist error, etc.
// Out-of-scope for this validation, though.
return true
default:
// Something went *seriously* wrong.
/*
Per https://pkg.go.dev/os#Stat:
"If there is an error, it will be of type *PathError."
*/
panic(err)
}
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number.
func isE164(fl FieldLevel) bool {
return e164Regex.MatchString(fl.Field().String())
@ -1539,7 +1673,9 @@ func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue boo
}
// requireCheckFieldValue is a func for check field value
func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool {
func requireCheckFieldValue(
fl FieldLevel, param string, value string, defaultNotFoundValue bool,
) bool {
field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param)
if !found {
return defaultNotFoundValue
@ -1623,10 +1759,10 @@ func excludedUnless(fl FieldLevel) bool {
}
for i := 0; i < len(params); i += 2 {
if !requireCheckFieldValue(fl, params[i], params[i+1], false) {
return true
return !hasValue(fl)
}
}
return !hasValue(fl)
return true
}
// excludedWith is the validation function
@ -2275,7 +2411,7 @@ func isFQDN(fl FieldLevel) bool {
return fqdnRegexRFC1123.MatchString(val)
}
// isDir is the validation function for validating if the current field's value is a valid directory.
// isDir is the validation function for validating if the current field's value is a valid existing directory.
func isDir(fl FieldLevel) bool {
field := fl.Field()
@ -2291,6 +2427,64 @@ func isDir(fl FieldLevel) bool {
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isDirPath is the validation function for validating if the current field's value is a valid directory.
func isDirPath(fl FieldLevel) bool {
var exists bool
var err error
field := fl.Field()
// If it exists, it obviously is valid.
// This is done first to avoid code duplication and unnecessary additional logic.
if exists = isDir(fl); exists {
return true
}
// It does not exist but may still be a valid path.
switch field.Kind() {
case reflect.String:
// Every OS allows for whitespace, but none
// let you use a dir with no name (to my knowledge).
// Unless you're dealing with raw inodes, but I digress.
if strings.TrimSpace(field.String()) == "" {
return false
}
if _, err = os.Stat(field.String()); err != nil {
switch t := err.(type) {
case *fs.PathError:
if t.Err == syscall.EINVAL {
// It's definitely an invalid character in the path.
return false
}
// It could be a permission error, a does-not-exist error, etc.
// Out-of-scope for this validation, though.
// Lastly, we make sure it is a directory.
if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
return true
} else {
return false
}
default:
// Something went *seriously* wrong.
/*
Per https://pkg.go.dev/os#Stat:
"If there is an error, it will be of type *PathError."
*/
panic(err)
}
}
// We repeat the check here to make sure it is an explicit directory in case the above os.Stat didn't trigger an error.
if strings.HasSuffix(field.String(), string(os.PathSeparator)) {
return true
} else {
return false
}
}
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
// isJSON is the validation function for validating if the current field's value is a valid json string.
func isJSON(fl FieldLevel) bool {
field := fl.Field()
@ -2316,7 +2510,9 @@ func isHostnamePort(fl FieldLevel) bool {
return false
}
// Port must be a iny <= 65535.
if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 {
if portNum, err := strconv.ParseInt(
port, 10, 32,
); err != nil || portNum > 65535 || portNum < 1 {
return false
}
@ -2479,6 +2675,13 @@ func isSemverFormat(fl FieldLevel) bool {
return semverRegex.MatchString(semverString)
}
// isCveFormat is the validation function for validating if the current field's value is a valid cve id, defined in CVE mitre org
func isCveFormat(fl FieldLevel) bool {
cveString := fl.Field().String()
return cveRegex.MatchString(cveString)
}
// isDnsRFC1035LabelFormat is the validation function
// for validating if the current field's value is
// a valid dns RFC 1035 label, defined in RFC 1035.
@ -2487,6 +2690,35 @@ func isDnsRFC1035LabelFormat(fl FieldLevel) bool {
return dnsRegexRFC1035Label.MatchString(val)
}
// digitsHaveLuhnChecksum returns true if and only if the last element of the given digits slice is the Luhn checksum of the previous elements
func digitsHaveLuhnChecksum(digits []string) bool {
size := len(digits)
sum := 0
for i, digit := range digits {
value, err := strconv.Atoi(digit)
if err != nil {
return false
}
if size%2 == 0 && i%2 == 0 || size%2 == 1 && i%2 == 1 {
v := value * 2
if v >= 10 {
sum += 1 + (v % 10)
} else {
sum += v
}
} else {
sum += value
}
}
return (sum % 10) == 0
}
// isMongoDB is the validation function for validating if the current field's value is valid mongoDB objectID
func isMongoDB(fl FieldLevel) bool {
val := fl.Field().String()
return mongodbRegex.MatchString(val)
}
// isCreditCard is the validation function for validating if the current field's value is a valid credit card number
func isCreditCard(fl FieldLevel) bool {
val := fl.Field().String()
@ -2505,22 +2737,33 @@ func isCreditCard(fl FieldLevel) bool {
return false
}
sum := 0
for i, digit := range ccDigits {
value, err := strconv.Atoi(digit)
if err != nil {
return false
}
if size%2 == 0 && i%2 == 0 || size%2 == 1 && i%2 == 1 {
v := value * 2
if v >= 10 {
sum += 1 + (v % 10)
} else {
sum += v
}
} else {
sum += value
}
}
return (sum % 10) == 0
return digitsHaveLuhnChecksum(ccDigits)
}
// hasLuhnChecksum is the validation for validating if the current field's value has a valid Luhn checksum
func hasLuhnChecksum(fl FieldLevel) bool {
field := fl.Field()
var str string // convert to a string which will then be split into single digits; easier and more readable than shifting/extracting single digits from a number
switch field.Kind() {
case reflect.String:
str = field.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
str = strconv.FormatInt(field.Int(), 10)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
str = strconv.FormatUint(field.Uint(), 10)
default:
panic(fmt.Sprintf("Bad field type %T", field.Interface()))
}
size := len(str)
if size < 2 { // there has to be at least one digit that carries a meaning + the checksum
return false
}
digits := strings.Split(str, "")
return digitsHaveLuhnChecksum(digits)
}
// isCron is the validation function for validating if the current field's value is a valid cron expression
func isCron(fl FieldLevel) bool {
cronString := fl.Field().String()
return cronRegex.MatchString(cronString)
}

View File

@ -120,7 +120,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr
var fld reflect.StructField
var tag string
var customName string
for i := 0; i < numFields; i++ {
fld = typ.Field(i)

View File

@ -51,7 +51,7 @@ var iso3166_1_alpha2 = map[string]bool{
"TV": true, "UG": true, "UA": true, "AE": true, "GB": true,
"US": true, "UM": true, "UY": true, "UZ": true, "VU": true,
"VE": true, "VN": true, "VG": true, "VI": true, "WF": true,
"EH": true, "YE": true, "ZM": true, "ZW": true,
"EH": true, "YE": true, "ZM": true, "ZW": true, "XK": true,
}
var iso3166_1_alpha3 = map[string]bool{
@ -105,7 +105,7 @@ var iso3166_1_alpha3 = map[string]bool{
"UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true,
"USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true,
"VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true,
"YEM": true, "ZMB": true, "ZWE": true, "ALA": true,
"YEM": true, "ZMB": true, "ZWE": true, "ALA": true, "UNK": true,
}
var iso3166_1_alpha_numeric = map[int]bool{
// see: https://www.iso.org/iso-3166-country-codes.html
@ -158,7 +158,7 @@ var iso3166_1_alpha_numeric = map[int]bool{
800: true, 804: true, 784: true, 826: true, 581: true,
840: true, 858: true, 860: true, 548: true, 862: true,
704: true, 92: true, 850: true, 876: true, 732: true,
887: true, 894: true, 716: true, 248: true,
887: true, 894: true, 716: true, 248: true, 153:true,
}
var iso3166_2 = map[string]bool{

File diff suppressed because it is too large Load Diff

View File

@ -44,12 +44,9 @@ func (ve ValidationErrors) Error() string {
buff := bytes.NewBufferString("")
var fe *fieldError
for i := 0; i < len(ve); i++ {
fe = ve[i].(*fieldError)
buff.WriteString(fe.Error())
buff.WriteString(ve[i].Error())
buff.WriteString("\n")
}

View File

@ -19,6 +19,7 @@ const (
e164RegexString = "^\\+[1-9]?[0-9]{7,14}$"
base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$"
base64RawURLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2,4})$"
iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$"
iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$"
uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
@ -64,6 +65,9 @@ const (
bicRegexString = `^[A-Za-z]{6}[A-Za-z0-9]{2}([A-Za-z0-9]{3})?$`
semverRegexString = `^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$` // numbered capture groups https://semver.org/
dnsRegexStringRFC1035Label = "^[a-z]([-a-z0-9]*[a-z0-9]){0,62}$"
cveRegexString = `^CVE-(1999|2\d{3})-(0[^0]\d{2}|0\d[^0]\d{1}|0\d{2}[^0]|[1-9]{1}\d{3,})$` // CVE Format Id https://cve.mitre.org/cve/identifiers/syntaxchange.html
mongodbRegexString = "^[a-f\\d]{24}$"
cronRegexString = `(@(annually|yearly|monthly|weekly|daily|hourly|reboot))|(@every (\d+(ns|us|µs|ms|s|m|h))+)|((((\d+,)+\d+|(\d+(\/|-)\d+)|\d+|\*) ?){5,7})`
)
var (
@ -83,6 +87,7 @@ var (
emailRegex = regexp.MustCompile(emailRegexString)
base64Regex = regexp.MustCompile(base64RegexString)
base64URLRegex = regexp.MustCompile(base64URLRegexString)
base64RawURLRegex = regexp.MustCompile(base64RawURLRegexString)
iSBN10Regex = regexp.MustCompile(iSBN10RegexString)
iSBN13Regex = regexp.MustCompile(iSBN13RegexString)
uUID3Regex = regexp.MustCompile(uUID3RegexString)
@ -118,8 +123,6 @@ var (
btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32)
btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32)
ethAddressRegex = regexp.MustCompile(ethAddressRegexString)
ethAddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString)
ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString)
uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString)
hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString)
hTMLRegex = regexp.MustCompile(hTMLRegexString)
@ -128,4 +131,7 @@ var (
bicRegex = regexp.MustCompile(bicRegexString)
semverRegex = regexp.MustCompile(semverRegexString)
dnsRegexRFC1035Label = regexp.MustCompile(dnsRegexStringRFC1035Label)
cveRegex = regexp.MustCompile(cveRegexString)
mongodbRegex = regexp.MustCompile(mongodbRegexString)
cronRegex = regexp.MustCompile(cronRegexString)
)

View File

@ -452,7 +452,6 @@ OUTER:
v.ct = ct
if !ct.fn(ctx, v) {
v.str1 = string(append(ns, cf.altName...))
if v.v.hasTagNameFunc {

View File

@ -190,14 +190,14 @@ func (v *Validate) ValidateMap(data map[string]interface{}, rules map[string]int
//
// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names:
//
// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
// // skip if tag key says it should be ignored
// if name == "-" {
// return ""
// }
// return name
// })
// validate.RegisterTagNameFunc(func(fld reflect.StructField) string {
// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0]
// // skip if tag key says it should be ignored
// if name == "-" {
// return ""
// }
// return name
// })
func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) {
v.tagNameFunc = fn
v.hasTagNameFunc = true
@ -613,7 +613,7 @@ func (v *Validate) Var(field interface{}, tag string) error {
}
// VarCtx validates a single variable using tag style validation and allows passing of contextual
// validation validation information via context.Context.
// validation information via context.Context.
// eg.
// var i int
// validate.Var(i, "gt=1,lt=10")
@ -632,6 +632,7 @@ func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (e
}
ctag := v.fetchCacheTag(tag)
val := reflect.ValueOf(field)
vd := v.pool.Get().(*validate)
vd.top = val

View File

@ -35,7 +35,7 @@ func (e *ErrBadName) Is(target error) bool {
}
// newErrBadName returns a ErrBadName which returns the given formatted string from Error().
func newErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
func newErrBadName(fmtStr string, args ...any) *ErrBadName {
return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
}

View File

@ -56,16 +56,16 @@ type stringConst string
// To discourage its use in scenarios where the value is not known at code
// authoring time, it must be passed a string constant:
//
// const str = "valid/string"
// MustParseReference(str)
// MustParseReference("another/valid/string")
// MustParseReference(str + "/and/more")
// const str = "valid/string"
// MustParseReference(str)
// MustParseReference("another/valid/string")
// MustParseReference(str + "/and/more")
//
// These will not compile:
//
// var str = "valid/string"
// MustParseReference(str)
// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
// var str = "valid/string"
// MustParseReference(str)
// MustParseReference(strings.Join([]string{"valid", "string"}, "/"))
func MustParseReference(s stringConst, opts ...Option) Reference {
ref, err := ParseReference(string(s), opts...)
if err != nil {

View File

@ -22,3 +22,4 @@
/trillian_log_signer
/trillian_map_server
default.etcd
cockroach-data/

View File

@ -18,7 +18,6 @@ linters-settings:
linters:
disable-all: true
enable:
- deadcode
- depguard
- gocyclo
- gofmt
@ -28,7 +27,7 @@ linters:
- megacheck
- misspell
- revive
- varcheck
- unused
# TODO(gbelvin): write license linter and commit to upstream.
# ./scripts/check_license.sh is run by ./scripts/presubmit.sh

View File

@ -2,6 +2,76 @@
## HEAD
## v1.5.1
### Storage
* A new storage driver for CockroachDB has been added. It's currently in alpha stage
with support provided by Equinix Metal.
### Misc
* Fix log server not exiting properly on SIGINT
### Dependency updates
* Switch from glog to klog by @jdolitsky in https://github.com/google/trillian/pull/2787
* Bump google.golang.org/api from 0.92.0 to 0.93.0 by @dependabot in https://github.com/google/trillian/pull/2800
* Bump cloud.google.com/go/spanner from 1.36.0 to 1.37.0 by @dependabot in https://github.com/google/trillian/pull/2803
* Bump google.golang.org/grpc from 1.48.0 to 1.49.0 by @dependabot in https://github.com/google/trillian/pull/2804
* Bump google.golang.org/api from 0.93.0 to 0.94.0 by @dependabot in https://github.com/google/trillian/pull/2802
* Bump cloud.google.com/go/spanner from 1.37.0 to 1.38.0 by @dependabot in https://github.com/google/trillian/pull/2806
* Bump k8s.io/klog/v2 from 2.70.1 to 2.80.0 by @dependabot in https://github.com/google/trillian/pull/2807
* Bump k8s.io/klog/v2 from 2.80.0 to 2.80.1 by @dependabot in https://github.com/google/trillian/pull/2808
* Bump github.com/google/go-cmp from 0.5.8 to 0.5.9 by @dependabot in https://github.com/google/trillian/pull/2809
* Bump google.golang.org/api from 0.94.0 to 0.95.0 by @dependabot in https://github.com/google/trillian/pull/2810
* Bump go.etcd.io/etcd/etcdctl/v3 from 3.5.4 to 3.5.5 by @dependabot in https://github.com/google/trillian/pull/2812
* Bump go.etcd.io/etcd/v3 from 3.5.4 to 3.5.5 by @dependabot in https://github.com/google/trillian/pull/2816
* Bump google.golang.org/api from 0.95.0 to 0.96.0 by @dependabot in https://github.com/google/trillian/pull/2813
* Bump google.golang.org/api from 0.96.0 to 0.97.0 by @dependabot in https://github.com/google/trillian/pull/2819
* Bump cloud.google.com/go/spanner from 1.38.0 to 1.39.0 by @dependabot in https://github.com/google/trillian/pull/2818
* Bump google.golang.org/api from 0.97.0 to 0.98.0 by @dependabot in https://github.com/google/trillian/pull/2820
* Bump google.golang.org/grpc from 1.49.0 to 1.50.0 by @dependabot in https://github.com/google/trillian/pull/2821
* Bump google.golang.org/grpc from 1.50.0 to 1.50.1 by @dependabot in https://github.com/google/trillian/pull/2823
* Bump google.golang.org/api from 0.98.0 to 0.99.0 by @dependabot in https://github.com/google/trillian/pull/2822
* Bump google.golang.org/api from 0.99.0 to 0.100.0 by @dependabot in https://github.com/google/trillian/pull/2824
* Bump github.com/prometheus/client_model from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2825
* Bump golang.org/x/tools from 0.1.12 to 0.2.0 by @dependabot in https://github.com/google/trillian/pull/2826
* Bump google.golang.org/api from 0.100.0 to 0.101.0 by @dependabot in https://github.com/google/trillian/pull/2827
* Bump github.com/prometheus/client_golang from 1.13.0 to 1.13.1 by @dependabot in https://github.com/google/trillian/pull/2828
* Bump golang.org/x/sys from 0.1.0 to 0.2.0 by @dependabot in https://github.com/google/trillian/pull/2829
* Bump google.golang.org/api from 0.101.0 to 0.102.0 by @dependabot in https://github.com/google/trillian/pull/2830
* Bump go.opencensus.io from 0.23.0 to 0.24.0 by @dependabot in https://github.com/google/trillian/pull/2832
* Bump cloud.google.com/go/spanner from 1.39.0 to 1.40.0 by @dependabot in https://github.com/google/trillian/pull/2831
* Bump github.com/prometheus/client_golang from 1.13.1 to 1.14.0 by @dependabot in https://github.com/google/trillian/pull/2838
* Bump google.golang.org/api from 0.102.0 to 0.103.0 by @dependabot in https://github.com/google/trillian/pull/2839
* Bump golang.org/x/crypto from 0.1.0 to 0.2.0 by @dependabot in https://github.com/google/trillian/pull/2841
* Bump golang.org/x/tools from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2840
* Dependabot: Also keep GitHub actions up-to-date by @JAORMX in https://github.com/google/trillian/pull/2842
* Bump actions/upload-artifact from 3.1.0 to 3.1.1 by @dependabot in https://github.com/google/trillian/pull/2843
* Bump golang.org/x/crypto from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2847
* Bump google.golang.org/grpc from 1.50.1 to 1.51.0 by @dependabot in https://github.com/google/trillian/pull/2845
* Bump github.com/cockroachdb/cockroach-go/v2 from 2.2.16 to 2.2.18 by @dependabot in https://github.com/google/trillian/pull/2846
* Bump go.etcd.io/etcd/v3 from 3.5.5 to 3.5.6 by @dependabot in https://github.com/google/trillian/pull/2849
* Bump github.com/cockroachdb/cockroach-go/v2 from 2.2.18 to 2.2.19 by @dependabot in https://github.com/google/trillian/pull/2856
* Bump golang.org/x/sys from 0.2.0 to 0.3.0 by @dependabot in https://github.com/google/trillian/pull/2858
* Bump cloud.google.com/go/spanner from 1.40.0 to 1.41.0 by @dependabot in https://github.com/google/trillian/pull/2857
* Bump actions/setup-go from 3.3.1 to 3.4.0 by @dependabot in https://github.com/google/trillian/pull/2862
* Bump github/codeql-action from 2.1.34 to 2.1.35 by @dependabot in https://github.com/google/trillian/pull/2861
* Bump golangci/golangci-lint-action from 3.3.0 to 3.3.1 by @dependabot in https://github.com/google/trillian/pull/2860
* Bump github.com/go-sql-driver/mysql from 1.6.0 to 1.7.0 by @dependabot in https://github.com/google/trillian/pull/2859
* Bump qs, body-parser and express in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2867
* Bump minimist from 1.2.0 to 1.2.7 in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2864
* Bump axios and @slack/webhook in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2868
* Bump json-bigint and google-auth-library in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2869
* Bump node-fetch from 2.6.0 to 2.6.7 in /scripts/gcb2slack by @dependabot in https://github.com/google/trillian/pull/2866
* Bump golang.org/x/tools from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/trillian/pull/2870
* Bump github/codeql-action from 2.1.35 to 2.1.36 by @dependabot in https://github.com/google/trillian/pull/2874
* Bump actions/checkout from 3.1.0 to 3.2.0 by @dependabot in https://github.com/google/trillian/pull/2873
* Bump golang.org/x/crypto from 0.3.0 to 0.4.0 by @dependabot in https://github.com/google/trillian/pull/2872
* Bump google.golang.org/api from 0.103.0 to 0.104.0 by @dependabot in https://github.com/google/trillian/pull/2871
* Bump cloud.google.com/go/spanner from 1.41.0 to 1.42.0 by @dependabot in https://github.com/google/trillian/pull/2877
## v.1.5.0
### Storage

View File

@ -59,10 +59,14 @@ Other examples of Trillian personalities are available in the
## Using the Code
**WARNING**: The Trillian codebase is still under development, but the Log mode
is now being used in production by several organizations. We will try to avoid
any further incompatible code and schema changes but cannot guarantee that they
will never be necessary.
The Trillian codebase is stable and is used in production by multiple
organizations, including many large-scale
[Certificate Transparency](https://certificate.transparency.dev) log
operators.
Given this, we do not plan to add any new features to this version of Trillian,
and will try to avoid any further incompatible code and schema changes but
cannot guarantee that they will never be necessary.
The current state of feature implementation is recorded in the
[Feature implementation matrix](docs/Feature_Implementation_Matrix.md).

View File

@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package trillian contains the generated protobuf code for the Trillian API.
package trillian
//go:generate protoc -I=. -I=third_party/googleapis --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:. --go-grpc_opt=require_unimplemented_servers=false trillian_log_api.proto trillian_admin_api.proto trillian.proto --doc_out=markdown,api.md:./docs/

View File

@ -446,6 +446,12 @@ func (x *Tree) GetDeleteTime() *timestamppb.Timestamp {
}
// SignedLogRoot represents a commitment by a Log to a particular tree.
//
// Note that the signature itself is no-longer provided by Trillian since
// https://github.com/google/trillian/pull/2452 .
// This functionality was intended to support a niche-use case but added
// significant complexity and was prone to causing confusion and
// misunderstanding for personality authors.
type SignedLogRoot struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache

View File

@ -174,6 +174,12 @@ message Tree {
}
// SignedLogRoot represents a commitment by a Log to a particular tree.
//
// Note that the signature itself is no-longer provided by Trillian since
// https://github.com/google/trillian/pull/2452 .
// This functionality was intended to support a niche-use case but added
// significant complexity and was prone to causing confusion and
// misunderstanding for personality authors.
message SignedLogRoot {
// log_root holds the TLS-serialization of the following structure (described
// in RFC5246 notation):

View File

@ -82,7 +82,7 @@ service TrillianLog {
rpc GetConsistencyProof(GetConsistencyProofRequest)
returns (GetConsistencyProofResponse) {}
// GetLatestSignedLogRoot returns the latest signed log root for a given tree,
// GetLatestSignedLogRoot returns the latest log root for a given tree,
// and optionally also includes a consistency proof from an earlier tree size
// to the new size of the tree.
//

View File

@ -43,7 +43,7 @@ type TrillianLogClient interface {
// If the requested tree size is larger than the server is aware of,
// the response will include the latest known log root and an empty proof.
GetConsistencyProof(ctx context.Context, in *GetConsistencyProofRequest, opts ...grpc.CallOption) (*GetConsistencyProofResponse, error)
// GetLatestSignedLogRoot returns the latest signed log root for a given tree,
// GetLatestSignedLogRoot returns the latest log root for a given tree,
// and optionally also includes a consistency proof from an earlier tree size
// to the new size of the tree.
//
@ -182,7 +182,7 @@ type TrillianLogServer interface {
// If the requested tree size is larger than the server is aware of,
// the response will include the latest known log root and an empty proof.
GetConsistencyProof(context.Context, *GetConsistencyProofRequest) (*GetConsistencyProofResponse, error)
// GetLatestSignedLogRoot returns the latest signed log root for a given tree,
// GetLatestSignedLogRoot returns the latest log root for a given tree,
// and optionally also includes a consistency proof from an earlier tree size
// to the new size of the tree.
//

112
vendor/github.com/imdario/mergo/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,112 @@
<!-- omit in toc -->
# Contributing to mergo
First off, thanks for taking the time to contribute! ❤️
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
> - Star the project
> - Tweet about it
> - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues
<!-- omit in toc -->
## Table of Contents
- [Code of Conduct](#code-of-conduct)
- [I Have a Question](#i-have-a-question)
- [I Want To Contribute](#i-want-to-contribute)
- [Reporting Bugs](#reporting-bugs)
- [Suggesting Enhancements](#suggesting-enhancements)
## Code of Conduct
This project and everyone participating in it is governed by the
[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
By participating, you are expected to uphold this code. Please report unacceptable behavior
to <>.
## I Have a Question
> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
If you then still feel the need to ask a question and need clarification, we recommend the following:
- Open an [Issue](https://github.com/imdario/mergo/issues/new).
- Provide as much context as you can about what you're running into.
- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
We will then take care of the issue as soon as possible.
## I Want To Contribute
> ### Legal Notice <!-- omit in toc -->
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
### Reporting Bugs
<!-- omit in toc -->
#### Before Submitting a Bug Report
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
- Make sure that you are using the latest version.
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
- Collect information about the bug:
- Stack trace (Traceback)
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
- Possibly your input and the output
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
<!-- omit in toc -->
#### How Do I Submit a Good Bug Report?
> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
- Explain the behavior you would expect and the actual behavior.
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
- Provide the information you collected in the previous section.
Once it's filed:
- The project team will label the issue accordingly.
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
<!-- omit in toc -->
#### Before Submitting an Enhancement
- Make sure that you are using the latest version.
- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
<!-- omit in toc -->
#### How Do I Submit a Good Enhancement Suggestion?
Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
- Use a **clear and descriptive title** for the issue to identify the suggestion.
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
<!-- omit in toc -->
## Attribution
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!

View File

@ -1,6 +1,5 @@
# Mergo
[![GoDoc][3]][4]
[![GitHub release][5]][6]
[![GoCard][7]][8]
@ -9,6 +8,7 @@
[![Sourcegraph][11]][12]
[![FOSSA Status][13]][14]
[![Become my sponsor][15]][16]
[![Tidelift][17]][18]
[1]: https://travis-ci.org/imdario/mergo.png
[2]: https://travis-ci.org/imdario/mergo
@ -26,6 +26,8 @@
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
[15]: https://img.shields.io/github/sponsors/imdario
[16]: https://github.com/sponsors/imdario
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
@ -55,7 +57,6 @@ If Mergo is useful to you, consider buying me a coffee, a beer, or making a mont
### Mergo in the wild
- [cli/cli](https://github.com/cli/cli)
- [moby/moby](https://github.com/moby/moby)
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
- [vmware/dispatch](https://github.com/vmware/dispatch)

14
vendor/github.com/imdario/mergo/SECURITY.md generated vendored Normal file
View File

@ -0,0 +1,14 @@
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.3.x | :white_check_mark: |
| < 0.3 | :x: |
## Security contact information
To report a security vulnerability, please use the
[Tidelift security contact](https://tidelift.com/security).
Tidelift will coordinate the fix and disclosure.

View File

@ -44,7 +44,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
}
// Remember, remember...
visited[h] = &visit{addr, typ, seen}
visited[h] = &visit{typ, seen, addr}
}
zeroValue := reflect.Value{}
switch dst.Kind() {
@ -58,7 +58,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
}
fieldName := field.Name
fieldName = changeInitialCase(fieldName, unicode.ToLower)
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
dstMap[fieldName] = src.Field(i).Interface()
}
}
@ -142,7 +142,7 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
func _map(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument
return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value

View File

@ -38,10 +38,11 @@ func isExportedComponent(field *reflect.StructField) bool {
}
type Config struct {
Transformers Transformers
Overwrite bool
ShouldNotDereference bool
AppendSlice bool
TypeCheck bool
Transformers Transformers
overwriteWithEmptyValue bool
overwriteSliceWithEmptyValue bool
sliceDeepCopy bool
@ -76,7 +77,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
// Remember, remember...
visited[h] = &visit{addr, typ, seen}
visited[h] = &visit{typ, seen, addr}
}
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
@ -95,7 +96,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
}
} else {
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) {
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
dst.Set(src)
}
}
@ -110,7 +111,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
if src.Kind() != reflect.Map {
if overwrite {
if overwrite && dst.CanSet() {
dst.Set(src)
}
return
@ -162,7 +163,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dstSlice = reflect.ValueOf(dstElement.Interface())
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
if typeCheck && srcSlice.Type() != dstSlice.Type() {
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
}
@ -194,22 +195,38 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
dst.SetMapIndex(key, dstSlice)
}
}
if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
continue
if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
continue
}
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
continue
}
}
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) {
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
if dst.IsNil() {
dst.Set(reflect.MakeMap(dst.Type()))
}
dst.SetMapIndex(key, srcElement)
}
}
// Ensure that all keys in dst are deleted if they are not in src.
if overwriteWithEmptySrc {
for _, key := range dst.MapKeys() {
srcElement := src.MapIndex(key)
if !srcElement.IsValid() {
dst.SetMapIndex(key, reflect.Value{})
}
}
}
case reflect.Slice:
if !dst.CanSet() {
break
}
if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy {
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
dst.Set(src)
} else if config.AppendSlice {
if src.Type() != dst.Type() {
@ -244,12 +261,18 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
if src.Kind() != reflect.Interface {
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
} else if src.Kind() == reflect.Ptr {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
if !config.ShouldNotDereference {
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
return
}
} else {
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
dst.Set(src)
}
}
} else if dst.Elem().Type() == src.Type() {
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
@ -262,7 +285,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
}
if dst.IsNil() || overwrite {
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
dst.Set(src)
}
break
@ -275,7 +298,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
break
}
default:
mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc)
mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
if mustSet {
if dst.CanSet() {
dst.Set(src)
@ -326,6 +349,12 @@ func WithOverrideEmptySlice(config *Config) {
config.overwriteSliceWithEmptyValue = true
}
// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
// (i.e. a non-nil pointer is never considered empty).
func WithoutDereference(config *Config) {
config.ShouldNotDereference = true
}
// WithAppendSlice will make merge append slices instead of overwriting it.
func WithAppendSlice(config *Config) {
config.AppendSlice = true
@ -344,7 +373,7 @@ func WithSliceDeepCopy(config *Config) {
func merge(dst, src interface{}, opts ...func(*Config)) error {
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
return ErrNonPointerAgument
return ErrNonPointerArgument
}
var (
vDst, vSrc reflect.Value

View File

@ -20,7 +20,7 @@ var (
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
ErrNonPointerAgument = errors.New("dst must be a pointer")
ErrNonPointerArgument = errors.New("dst must be a pointer")
)
// During deepMerge, must keep track of checks that are
@ -28,13 +28,13 @@ var (
// checks in progress are true when it reencounters them.
// Visited are stored in a map indexed by 17 * a1 + a2;
type visit struct {
ptr uintptr
typ reflect.Type
next *visit
ptr uintptr
}
// From src/pkg/encoding/json/encode.go.
func isEmptyValue(v reflect.Value) bool {
func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
@ -50,7 +50,10 @@ func isEmptyValue(v reflect.Value) bool {
if v.IsNil() {
return true
}
return isEmptyValue(v.Elem())
if shouldDereference {
return isEmptyValue(v.Elem(), shouldDereference)
}
return false
case reflect.Func:
return v.IsNil()
case reflect.Invalid:

View File

@ -3,7 +3,7 @@
before:
hooks:
- ./gen.sh
- go install mvdan.cc/garble@v0.7.2
- go install mvdan.cc/garble@v0.9.3
builds:
-

View File

@ -16,6 +16,27 @@ This package provides various compression algorithms.
# changelog
* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
* s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
* Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
* Jan 3rd, 2023 (v1.15.14)
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718

View File

@ -260,7 +260,9 @@ func (s *Scratch) buildDtable() error {
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
br.init(s.br.unread())
if err := br.init(s.br.unread()); err != nil {
return err
}
var s1, s2 decoder
// Initialize and decode first state and symbol.

View File

@ -60,6 +60,22 @@ func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) {
b.nBits += encA.nBits + encB.nBits
}
// encFourSymbols adds up to 32 bits from four symbols.
// It will not check if there is space for them,
// so the caller must ensure that b has been flushed recently.
func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
bitsA := encA.nBits
bitsB := bitsA + encB.nBits
bitsC := bitsB + encC.nBits
bitsD := bitsC + encD.nBits
combined := uint64(encA.val) |
(uint64(encB.val) << (bitsA & 63)) |
(uint64(encC.val) << (bitsB & 63)) |
(uint64(encD.val) << (bitsC & 63))
b.bitContainer |= combined << (b.nBits & 63)
b.nBits += bitsD
}
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {

View File

@ -248,8 +248,7 @@ func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
bw.encTwoSymbols(cTable, tmp[3], tmp[2])
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
}
} else {
for ; n >= 0; n -= 4 {

Some files were not shown because too many files have changed in this diff Show More