mirror of
https://github.com/containers/skopeo.git
synced 2025-04-27 11:01:18 +00:00
fix(deps): update github.com/containers/image/v5 digest to faa4f4f
Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com>
This commit is contained in:
parent
39308abb37
commit
4d80bf8c7d
46
go.mod
46
go.mod
@ -5,9 +5,9 @@ go 1.19
|
||||
require (
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/containers/common v0.57.4
|
||||
github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b
|
||||
github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f
|
||||
github.com/containers/ocicrypt v1.1.9
|
||||
github.com/containers/storage v1.52.0
|
||||
github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
@ -15,9 +15,9 @@ require (
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/term v0.17.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
@ -26,12 +26,12 @@ require (
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.2 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.3 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/errdefs v0.1.0 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 // indirect
|
||||
@ -45,7 +45,7 @@ require (
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.2 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
@ -55,15 +55,15 @@ require (
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.0 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.1 // indirect
|
||||
github.com/go-openapi/swag v0.22.9 // indirect
|
||||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-containerregistry v0.17.0 // indirect
|
||||
github.com/google/go-containerregistry v0.19.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
@ -72,8 +72,9 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.6 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
@ -83,11 +84,11 @@ require (
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/sys/mountinfo v0.7.1 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/runc v1.1.12 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.2.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
@ -98,9 +99,10 @@ require (
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect
|
||||
github.com/sigstore/fulcio v1.4.3 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.8.1 // indirect
|
||||
github.com/sigstore/sigstore v1.8.2 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.1 // indirect
|
||||
@ -112,23 +114,25 @@ require (
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.13.1 // indirect
|
||||
go.mongodb.org/mongo-driver v1.14.0 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.19.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 // indirect
|
||||
golang.org/x/crypto v0.20.0 // indirect
|
||||
golang.org/x/mod v0.15.0 // indirect
|
||||
golang.org/x/net v0.21.0 // indirect
|
||||
golang.org/x/oauth2 v0.17.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.17.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.17.0 // indirect
|
||||
golang.org/x/tools v0.18.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
|
||||
|
109
go.sum
109
go.sum
@ -10,8 +10,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.3 h1:5GNGrobGs/sN/0nFO21W9k4lFn+iXXZAE8fCZbmdRak=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.3/go.mod h1:WuNfcaYNaw+KpCEsZCIM6HCEmu0c5HfXpi+dDSmveP0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
@ -29,21 +29,21 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
|
||||
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
|
||||
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
|
||||
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
|
||||
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
|
||||
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containers/common v0.57.4 h1:kmfBad92kUjP5X44BPpOwMe+eZQqaKETfS+ASeL0g+g=
|
||||
github.com/containers/common v0.57.4/go.mod h1:o3L3CyOI9yr+JC8l4dZgvqTxcjs3qdKmkek00uchgvw=
|
||||
github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b h1:SDiNbvEwiBB0TdoIBkfkY39FD1c2jzL3aJIXRsSr0oA=
|
||||
github.com/containers/image/v5 v5.29.3-0.20240207231441-93b4b55d865b/go.mod h1:hD2xTPHZ/QaNHhxx92ysHfy8OlrakkfkHQkaz4tD7Gs=
|
||||
github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f h1:iJOH809/o1+PGUa+WlkOzC17o2YgXZwylsG6W37Rz2A=
|
||||
github.com/containers/image/v5 v5.29.3-0.20240301163503-faa4f4fd0e4f/go.mod h1:lXWDxGqOqfwx8XaHyVM2BKv3Y99qTlM7dYAD/P45Mh4=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
|
||||
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
|
||||
github.com/containers/storage v1.52.0 h1:8QFFeJg2cQFN0TyJguxHrSz3bl7XtMRnfXrTsvLVkuY=
|
||||
github.com/containers/storage v1.52.0/go.mod h1:PE+L330tisEjQrAVkfAlW8ECvqzc/jusrxJzu9TEi2w=
|
||||
github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2 h1:9+rUow7EtVgtlcn8oFValad/M548QnSpSCSBctduPbw=
|
||||
github.com/containers/storage v1.52.1-0.20240301111729-226cffb1c4d2/go.mod h1:mFA6QpUoT9qTa3q2DD1CvSo3Az3syNkw1P9X+4nUYdY=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
@ -79,8 +79,8 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-jose/go-jose/v3 v3.0.2 h1:2Edjn8Nrb44UvTdp84KU0bBPs1cO7noRCybtS3eJEUQ=
|
||||
github.com/go-jose/go-jose/v3 v3.0.2/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
|
||||
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@ -114,8 +114,8 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
|
||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=
|
||||
github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
|
||||
github.com/go-openapi/strfmt v0.22.1 h1:5Ky8cybT4576C6Ffc+8gYji/wRXCo6Ozm8RaWjPI6jc=
|
||||
github.com/go-openapi/strfmt v0.22.1/go.mod h1:OfVoytIXJasDkkGvkb1Cceb3BPyMOwk1FgmyyEw7NYg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
@ -124,7 +124,7 @@ github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZC
|
||||
github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c=
|
||||
github.com/go-rod/rod v0.114.7 h1:h4pimzSOUnw7Eo41zdJA788XsawzHjJMyzCE3BrBww0=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
@ -181,9 +181,10 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk=
|
||||
github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-containerregistry v0.19.0 h1:uIsMRBV7m/HDkDxE/nXMnv1q+lOOSPlQ/ywc5JbB8Ic=
|
||||
github.com/google/go-containerregistry v0.19.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -191,8 +192,8 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBx
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
|
||||
@ -222,8 +223,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI=
|
||||
github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
|
||||
github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
@ -231,7 +232,8 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
@ -264,6 +266,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/sys/mountinfo v0.7.1 h1:/tTvQaSJRr2FshkhXiIpux6fQ2Zvc4j7tAhMTStAG2g=
|
||||
github.com/moby/sys/mountinfo v0.7.1/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
|
||||
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
@ -283,10 +287,8 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4=
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
|
||||
github.com/opencontainers/runc v1.1.12 h1:BOIssBaW1La0/qbNZHXOOa71dZfZEQOzW7dqQf3phss=
|
||||
github.com/opencontainers/runc v1.1.12/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
github.com/opencontainers/selinux v1.11.0/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
@ -295,6 +297,7 @@ github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f h1:/UDgs8FGMqw
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc=
|
||||
github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
|
||||
github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@ -314,6 +317,7 @@ github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUc
|
||||
github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT8YD6Td9ZMSU9APFcsk=
|
||||
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
@ -325,12 +329,13 @@ github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
|
||||
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo=
|
||||
github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E=
|
||||
github.com/sigstore/sigstore v1.8.2 h1:0Ttjcn3V0fVQXlYq7+oHaaHkGFIt3ywm7SF4JTU/l8c=
|
||||
github.com/sigstore/sigstore v1.8.2/go.mod h1:CHVcSyknCcjI4K2ZhS1SI28r0tcQyBlwtALG536x1DY=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@ -357,8 +362,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0=
|
||||
github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
@ -378,10 +383,8 @@ github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YH
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -401,8 +404,8 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
|
||||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
|
||||
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
|
||||
go.mongodb.org/mongo-driver v1.14.0 h1:P98w8egYRjYe3XDjxhYJagTokP/H6HzlsnojRgZRd80=
|
||||
go.mongodb.org/mongo-driver v1.14.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
@ -412,6 +415,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||
@ -419,29 +423,31 @@ go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJ
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo=
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.20.0 h1:jmAMJJZXr5KiCw05dfYK9QnqaqKLYXijU23lsEdcQqg=
|
||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA=
|
||||
golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
|
||||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
|
||||
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8=
|
||||
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -455,11 +461,13 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4=
|
||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
|
||||
golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -469,6 +477,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -487,10 +496,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y=
|
||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -500,6 +513,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
@ -516,8 +530,9 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
|
||||
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ=
|
||||
golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -529,10 +544,10 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA=
|
||||
google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
|
87
vendor/github.com/Microsoft/hcsshim/Makefile
generated
vendored
87
vendor/github.com/Microsoft/hcsshim/Makefile
generated
vendored
@ -29,12 +29,23 @@ ifeq "$(DEV_BUILD)" "1"
|
||||
DELTA_TARGET=out/delta-dev.tar.gz
|
||||
endif
|
||||
|
||||
ifeq "$(SNP_BUILD)" "1"
|
||||
DELTA_TARGET=out/delta-snp.tar.gz
|
||||
endif
|
||||
|
||||
# The link aliases for gcstools
|
||||
GCS_TOOLS=\
|
||||
generichook \
|
||||
install-drivers
|
||||
|
||||
.PHONY: all always rootfs test
|
||||
# Common path prefix.
|
||||
PATH_PREFIX:=
|
||||
# These have PATH_PREFIX prepended to obtain the full path in recipies e.g. $(PATH_PREFIX)/$(VMGS_TOOL)
|
||||
VMGS_TOOL:=
|
||||
IGVM_TOOL:=
|
||||
KERNEL_PATH:=
|
||||
|
||||
.PHONY: all always rootfs test snp simple
|
||||
|
||||
.DEFAULT_GOAL := all
|
||||
|
||||
@ -49,9 +60,58 @@ test:
|
||||
|
||||
rootfs: out/rootfs.vhd
|
||||
|
||||
out/rootfs.vhd: out/rootfs.tar.gz bin/cmd/tar2ext4
|
||||
snp: out/kernelinitrd.vmgs out/rootfs.hash.vhd out/rootfs.vhd out/v2056.vmgs
|
||||
|
||||
simple: out/simple.vmgs snp
|
||||
|
||||
%.vmgs: %.bin
|
||||
rm -f $@
|
||||
# du -BM returns the size of the bin file in M, eg 7M. The sed command replaces the M with *1024*1024 and then bc does the math to convert to bytes
|
||||
$(PATH_PREFIX)/$(VMGS_TOOL) create --filepath $@ --filesize `du -BM $< | sed "s/M.*/*1024*1024/" | bc`
|
||||
$(PATH_PREFIX)/$(VMGS_TOOL) write --filepath $@ --datapath $< -i=8
|
||||
|
||||
# Simplest debug UVM used to test changes to the linux kernel. No dmverity protection. Boots an initramdisk rather than directly booting a vhd disk.
|
||||
out/simple.bin: out/initrd.img $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup_simple.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 rdinit=/startup_simple.sh" -rdinit out/initrd.img -vtl 0
|
||||
|
||||
ROOTFS_DEVICE:=/dev/sda
|
||||
VERITY_DEVICE:=/dev/sdb
|
||||
# Debug build for use with uvmtester. UVM with dm-verity protected vhd disk mounted directly via the kernel command line. Ignores corruption in dm-verity protected disk. (Use dmesg to see if dm-verity is ignoring data corruption.)
|
||||
out/v2056.bin: out/rootfs.vhd out/rootfs.hash.vhd $(PATH_PREFIX)/$(KERNEL_PATH) out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt boot/startup_v2056.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt) 1 ignore_corruption\" init=/startup_v2056.sh" -vtl 0
|
||||
|
||||
# Full UVM with dm-verity protected vhd disk mounted directly via the kernel command line.
|
||||
out/kernelinitrd.bin: out/rootfs.vhd out/rootfs.hash.vhd out/rootfs.hash.datasectors out/rootfs.hash.datablocksize out/rootfs.hash.hashblocksize out/rootfs.hash.datablocks out/rootfs.hash.rootdigest out/rootfs.hash.salt $(PATH_PREFIX)/$(KERNEL_PATH) boot/startup.sh
|
||||
rm -f $@
|
||||
python3 $(PATH_PREFIX)/$(IGVM_TOOL) -o $@ -kernel $(PATH_PREFIX)/$(KERNEL_PATH) -append "8250_core.nr_uarts=0 panic=-1 debug loglevel=7 root=/dev/dm-0 dm-mod.create=\"dmverity,,,ro,0 $(shell cat out/rootfs.hash.datasectors) verity 1 $(ROOTFS_DEVICE) $(VERITY_DEVICE) $(shell cat out/rootfs.hash.datablocksize) $(shell cat out/rootfs.hash.hashblocksize) $(shell cat out/rootfs.hash.datablocks) 0 sha256 $(shell cat out/rootfs.hash.rootdigest) $(shell cat out/rootfs.hash.salt)\" init=/startup.sh" -vtl 0
|
||||
|
||||
# Rule to make a vhd from a file. This is used to create the rootfs.hash.vhd from rootfs.hash.
|
||||
%.vhd: % bin/cmd/tar2ext4
|
||||
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
|
||||
|
||||
# Rule to make a vhd from an ext4 file. This is used to create the rootfs.vhd from rootfs.ext4.
|
||||
%.vhd: %.ext4 bin/cmd/tar2ext4
|
||||
./bin/cmd/tar2ext4 -only-vhd -i $< -o $@
|
||||
|
||||
%.hash %.hash.info %.hash.datablocks %.hash.rootdigest %hash.datablocksize %.hash.datasectors %.hash.hashblocksize: %.ext4 %.hash.salt
|
||||
veritysetup format --no-superblock --salt $(shell cat out/rootfs.hash.salt) $< $*.hash > $*.hash.info
|
||||
# Retrieve info required by dm-verity at boot time
|
||||
# Get the blocksize of rootfs
|
||||
cat $*.hash.info | awk '/^Root hash:/{ print $$3 }' > $*.hash.rootdigest
|
||||
cat $*.hash.info | awk '/^Salt:/{ print $$2 }' > $*.hash.salt
|
||||
cat $*.hash.info | awk '/^Data block size:/{ print $$4 }' > $*.hash.datablocksize
|
||||
cat $*.hash.info | awk '/^Hash block size:/{ print $$4 }' > $*.hash.hashblocksize
|
||||
cat $*.hash.info | awk '/^Data blocks:/{ print $$3 }' > $*.hash.datablocks
|
||||
echo $$(( $$(cat $*.hash.datablocks) * $$(cat $*.hash.datablocksize) / 512 )) > $*.hash.datasectors
|
||||
|
||||
out/rootfs.hash.salt:
|
||||
hexdump -vn32 -e'8/4 "%08X" 1 "\n"' /dev/random > $@
|
||||
|
||||
out/rootfs.ext4: out/rootfs.tar.gz bin/cmd/tar2ext4
|
||||
gzip -f -d ./out/rootfs.tar.gz
|
||||
bin/cmd/tar2ext4 -vhd -i ./out/rootfs.tar -o $@
|
||||
./bin/cmd/tar2ext4 -i ./out/rootfs.tar -o $@
|
||||
|
||||
out/rootfs.tar.gz: out/initrd.img
|
||||
rm -rf rootfs-conv
|
||||
@ -74,6 +134,20 @@ out/delta-dev.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report
|
||||
tar -zcf $@ -C rootfs-dev .
|
||||
rm -rf rootfs-dev
|
||||
|
||||
out/delta-snp.tar.gz: out/delta.tar.gz bin/internal/tools/snp-report boot/startup_v2056.sh boot/startup_simple.sh boot/startup.sh
|
||||
rm -rf rootfs-snp
|
||||
mkdir rootfs-snp
|
||||
tar -xzf out/delta.tar.gz -C rootfs-snp
|
||||
cp boot/startup_v2056.sh rootfs-snp/startup_v2056.sh
|
||||
cp boot/startup_simple.sh rootfs-snp/startup_simple.sh
|
||||
cp boot/startup.sh rootfs-snp/startup.sh
|
||||
cp bin/internal/tools/snp-report rootfs-snp/bin/
|
||||
chmod a+x rootfs-snp/startup_v2056.sh
|
||||
chmod a+x rootfs-snp/startup_simple.sh
|
||||
chmod a+x rootfs-snp/startup.sh
|
||||
tar -zcf $@ -C rootfs-snp .
|
||||
rm -rf rootfs-snp
|
||||
|
||||
out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths Makefile
|
||||
@mkdir -p out
|
||||
rm -rf rootfs
|
||||
@ -94,7 +168,10 @@ out/delta.tar.gz: bin/init bin/vsockexec bin/cmd/gcs bin/cmd/gcstools bin/cmd/ho
|
||||
tar -zcf $@ -C rootfs .
|
||||
rm -rf rootfs
|
||||
|
||||
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report:
|
||||
out/containerd-shim-runhcs-v1.exe:
|
||||
GOOS=windows $(GO_BUILD) -o $@ $(SRCROOT)/cmd/containerd-shim-runhcs-v1
|
||||
|
||||
bin/cmd/gcs bin/cmd/gcstools bin/cmd/hooks/wait-paths bin/cmd/tar2ext4 bin/internal/tools/snp-report bin/cmd/dmverity-vhd:
|
||||
@mkdir -p $(dir $@)
|
||||
GOOS=linux $(GO_BUILD) -o $@ $(SRCROOT)/$(@:bin/%=%)
|
||||
|
||||
@ -108,4 +185,4 @@ bin/init: init/init.o vsockexec/vsock.o
|
||||
|
||||
%.o: %.c
|
||||
@mkdir -p $(dir $@)
|
||||
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
|
||||
$(CC) $(CFLAGS) $(CPPFLAGS) -c -o $@ $<
|
4
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/oc/errors.go
generated
vendored
@ -6,7 +6,7 @@ import (
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -16,7 +16,7 @@ import (
|
||||
|
||||
func toStatusCode(err error) codes.Code {
|
||||
// checks if err implements GRPCStatus() *"google.golang.org/grpc/status".Status,
|
||||
// wraps an error defined in "github.com/containerd/containerd/errdefs", or is a
|
||||
// wraps an error defined in "github.com/containerd/errdefs", or is a
|
||||
// context timeout or cancelled error
|
||||
if s, ok := status.FromError(errdefs.ToGRPC(err)); ok {
|
||||
return s.Code()
|
||||
|
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
16
vendor/github.com/containerd/containerd/NOTICE
generated
vendored
@ -1,16 +0,0 @@
|
||||
Docker
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see https://www.bis.doc.gov
|
||||
|
||||
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
13
vendor/github.com/containerd/errdefs/README.md
generated
vendored
Normal file
13
vendor/github.com/containerd/errdefs/README.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# errdefs
|
||||
|
||||
A Go package for defining and checking common containerd errors.
|
||||
|
||||
## Project details
|
||||
|
||||
**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
||||
As a containerd sub-project, you will find the:
|
||||
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
|
||||
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
|
||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
|
||||
|
||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
11
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
11
vendor/github.com/containers/image/v5/copy/compression.go
generated
vendored
@ -23,9 +23,9 @@ var (
|
||||
// compressionBufferSize is the buffer size used to compress a blob
|
||||
compressionBufferSize = 1048576
|
||||
|
||||
// expectedCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// expectedBaseCompressionFormats is used to check if a blob with a specified media type is compressed
|
||||
// using the algorithm that the media type says it should be compressed with
|
||||
expectedCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
expectedBaseCompressionFormats = map[string]*compressiontypes.Algorithm{
|
||||
imgspecv1.MediaTypeImageLayerGzip: &compression.Gzip,
|
||||
imgspecv1.MediaTypeImageLayerZstd: &compression.Zstd,
|
||||
manifest.DockerV2Schema2LayerMediaType: &compression.Gzip,
|
||||
@ -62,8 +62,8 @@ func blobPipelineDetectCompressionStep(stream *sourceStream, srcInfo types.BlobI
|
||||
res.srcCompressorName = internalblobinfocache.Uncompressed
|
||||
}
|
||||
|
||||
if expectedFormat, known := expectedCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.Name() != expectedFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedFormat.Name(), format.Name())
|
||||
if expectedBaseFormat, known := expectedBaseCompressionFormats[stream.info.MediaType]; known && res.isCompressed && format.BaseVariantName() != expectedBaseFormat.Name() {
|
||||
logrus.Debugf("blob %s with type %s should be compressed with %s, but compressor appears to be %s", srcInfo.Digest.String(), srcInfo.MediaType, expectedBaseFormat.Name(), format.Name())
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
@ -172,7 +172,8 @@ func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bp
|
||||
// bpcRecompressCompressed checks if we should be recompressing a compressed input to another format, and returns a *bpCompressionStepData if so.
|
||||
func (ic *imageCopier) bpcRecompressCompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
|
||||
if ic.c.dest.DesiredLayerCompression() == types.Compress && detected.isCompressed &&
|
||||
ic.compressionFormat != nil && ic.compressionFormat.Name() != detected.format.Name() {
|
||||
ic.compressionFormat != nil &&
|
||||
(ic.compressionFormat.Name() != detected.format.Name() && ic.compressionFormat.Name() != detected.format.BaseVariantName()) {
|
||||
// When the blob is compressed, but the desired format is different, it first needs to be decompressed and finally
|
||||
// re-compressed using the desired format.
|
||||
logrus.Debugf("Blob will be converted")
|
||||
|
4
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/digesting_reader.go
generated
vendored
@ -23,11 +23,11 @@ type digestingReader struct {
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
var digester digest.Digester
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
return nil, fmt.Errorf("invalid digest specification %q: %w", expectedDigest, err)
|
||||
}
|
||||
digestAlgorithm := expectedDigest.Algorithm()
|
||||
if !digestAlgorithm.Available() {
|
||||
return nil, fmt.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
|
||||
return nil, fmt.Errorf("invalid digest specification %q: unsupported digest algorithm %q", expectedDigest, digestAlgorithm)
|
||||
}
|
||||
digester = digestAlgorithm.Digester()
|
||||
|
||||
|
3
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
3
vendor/github.com/containers/image/v5/copy/multiple.go
generated
vendored
@ -38,6 +38,7 @@ type instanceCopy struct {
|
||||
|
||||
// Fields which can be used by callers when operation
|
||||
// is `instanceCopyClone`
|
||||
cloneArtifactType string
|
||||
cloneCompressionVariant OptionCompressionVariant
|
||||
clonePlatform *imgspecv1.Platform
|
||||
cloneAnnotations map[string]string
|
||||
@ -142,6 +143,7 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest.
|
||||
res = append(res, instanceCopy{
|
||||
op: instanceCopyClone,
|
||||
sourceDigest: instanceDigest,
|
||||
cloneArtifactType: instanceDetails.ReadOnly.ArtifactType,
|
||||
cloneCompressionVariant: compressionVariant,
|
||||
clonePlatform: instanceDetails.ReadOnly.Platform,
|
||||
cloneAnnotations: maps.Clone(instanceDetails.ReadOnly.Annotations),
|
||||
@ -268,6 +270,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte,
|
||||
AddDigest: updated.manifestDigest,
|
||||
AddSize: int64(len(updated.manifest)),
|
||||
AddMediaType: updated.manifestMIMEType,
|
||||
AddArtifactType: instance.cloneArtifactType,
|
||||
AddPlatform: instance.clonePlatform,
|
||||
AddAnnotations: instance.cloneAnnotations,
|
||||
AddCompressionAlgorithms: updated.compressionAlgorithms,
|
||||
|
4
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
4
vendor/github.com/containers/image/v5/copy/progress_bars.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
@ -148,13 +149,14 @@ type blobChunkAccessorProxy struct {
|
||||
// The readers must be fully consumed, in the order they are returned, before blocking
|
||||
// to read the next chunk.
|
||||
func (s *blobChunkAccessorProxy) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) {
|
||||
start := time.Now()
|
||||
rc, errs, err := s.wrapped.GetBlobAt(ctx, info, chunks)
|
||||
if err == nil {
|
||||
total := int64(0)
|
||||
for _, c := range chunks {
|
||||
total += int64(c.Length)
|
||||
}
|
||||
s.bar.IncrInt64(total)
|
||||
s.bar.EwmaIncrInt64(total, time.Since(start))
|
||||
}
|
||||
return rc, errs, err
|
||||
}
|
||||
|
52
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
52
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -383,7 +383,11 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context,
|
||||
|
||||
compressionAlgos := set.New[string]()
|
||||
for _, srcInfo := range ic.src.LayerInfos() {
|
||||
if _, c := compressionEditsFromMIMEType(srcInfo); c != nil {
|
||||
_, c, err := compressionEditsFromBlobInfo(srcInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
compressionAlgos.Add(c.Name())
|
||||
}
|
||||
}
|
||||
@ -636,21 +640,28 @@ type diffIDResult struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// compressionEditsFromMIMEType returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo, based on a MIME type of srcInfo.
|
||||
func compressionEditsFromMIMEType(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm) {
|
||||
// compressionEditsFromBlobInfo returns a (CompressionOperation, CompressionAlgorithm) value pair suitable
|
||||
// for types.BlobInfo.
|
||||
func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompression, *compressiontypes.Algorithm, error) {
|
||||
// This MIME type → compression mapping belongs in manifest-specific code in our manifest
|
||||
// package (but we should preferably replace/change UpdatedImage instead of productizing
|
||||
// this workaround).
|
||||
switch srcInfo.MediaType {
|
||||
case manifest.DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayerGzip:
|
||||
return types.PreserveOriginal, &compression.Gzip
|
||||
return types.PreserveOriginal, &compression.Gzip, nil
|
||||
case imgspecv1.MediaTypeImageLayerZstd:
|
||||
return types.PreserveOriginal, &compression.Zstd
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return types.PreserveOriginal, nil, err
|
||||
}
|
||||
if tocDigest != nil {
|
||||
return types.PreserveOriginal, &compression.ZstdChunked, nil
|
||||
}
|
||||
return types.PreserveOriginal, &compression.Zstd, nil
|
||||
case manifest.DockerV2SchemaLayerMediaTypeUncompressed, imgspecv1.MediaTypeImageLayer:
|
||||
return types.Decompress, nil
|
||||
return types.Decompress, nil, nil
|
||||
default:
|
||||
return types.PreserveOriginal, nil
|
||||
return types.PreserveOriginal, nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -666,7 +677,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
// (Sadly UpdatedImage() is documented to not update MediaTypes from
|
||||
// ManifestUpdateOptions.LayerInfos[].MediaType, so we are doing it indirectly.)
|
||||
if srcInfo.CompressionOperation == types.PreserveOriginal && srcInfo.CompressionAlgorithm == nil {
|
||||
srcInfo.CompressionOperation, srcInfo.CompressionAlgorithm = compressionEditsFromMIMEType(srcInfo)
|
||||
op, algo, err := compressionEditsFromBlobInfo(srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
srcInfo.CompressionOperation = op
|
||||
srcInfo.CompressionAlgorithm = algo
|
||||
}
|
||||
|
||||
ic.c.printCopyInfo("blob", srcInfo)
|
||||
@ -695,11 +711,16 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
requiredCompression = ic.compressionFormat
|
||||
}
|
||||
|
||||
var tocDigest digest.Digest
|
||||
|
||||
// Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
d, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
if d != nil {
|
||||
tocDigest = *d
|
||||
}
|
||||
|
||||
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
@ -718,7 +739,11 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
if reused {
|
||||
logrus.Debugf("Skipping blob %s (already present):", srcInfo.Digest)
|
||||
func() { // A scope for defer
|
||||
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", "skipped: already exists")
|
||||
label := "skipped: already exists"
|
||||
if reusedBlob.MatchedByTOCDigest {
|
||||
label = "skipped: already exists (found by TOC)"
|
||||
}
|
||||
bar := ic.c.createProgressBar(pool, false, types.BlobInfo{Digest: reusedBlob.Digest, Size: 0}, "blob", label)
|
||||
defer bar.Abort(false)
|
||||
bar.mark100PercentComplete()
|
||||
}()
|
||||
@ -751,7 +776,10 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
wrapped: ic.c.rawSource,
|
||||
bar: bar,
|
||||
}
|
||||
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, ic.c.blobInfoCache)
|
||||
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
LayerIndex: layerIndex,
|
||||
})
|
||||
if err == nil {
|
||||
if srcInfo.Size != -1 {
|
||||
refill := srcInfo.Size - bar.Current()
|
||||
|
35
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
35
vendor/github.com/containers/image/v5/docker/docker_client.go
generated
vendored
@ -978,13 +978,10 @@ func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, t
|
||||
// This function can return nil reader when no url is supported by this function. In this case, the caller
|
||||
// should fallback to fetch the non-external blob (i.e. pull from the registry).
|
||||
func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) {
|
||||
var (
|
||||
resp *http.Response
|
||||
err error
|
||||
)
|
||||
if len(urls) == 0 {
|
||||
return nil, 0, errors.New("internal error: getExternalBlob called with no URLs")
|
||||
}
|
||||
var remoteErrors []error
|
||||
for _, u := range urls {
|
||||
blobURL, err := url.Parse(u)
|
||||
if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") {
|
||||
@ -993,24 +990,28 @@ func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.R
|
||||
// NOTE: we must not authenticate on additional URLs as those
|
||||
// can be abused to leak credentials or tokens. Please
|
||||
// refer to CVE-2020-15157 for more information.
|
||||
resp, err = c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err == nil {
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
break
|
||||
resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil)
|
||||
if err != nil {
|
||||
remoteErrors = append(remoteErrors, err)
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err := fmt.Errorf("error fetching external blob from %q: %d (%s)", u, resp.StatusCode, http.StatusText(resp.StatusCode))
|
||||
remoteErrors = append(remoteErrors, err)
|
||||
logrus.Debug(err)
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
}
|
||||
if resp == nil && err == nil {
|
||||
if remoteErrors == nil {
|
||||
return nil, 0, nil // fallback to non-external blob
|
||||
}
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
err := fmt.Errorf("failed fetching external blob from all urls: %w", remoteErrors[0])
|
||||
for _, e := range remoteErrors[1:] {
|
||||
err = fmt.Errorf("%s, %w", err, e)
|
||||
}
|
||||
return resp.Body, getBlobSize(resp), nil
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
func getBlobSize(resp *http.Response) int64 {
|
||||
|
3
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
3
vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go
generated
vendored
@ -17,7 +17,8 @@ func CandidateMatchesTryReusingBlobOptions(options private.TryReusingBlobOptions
|
||||
// The caller must re-compress to build those annotations.
|
||||
return false
|
||||
}
|
||||
if candidateCompression == nil || (options.RequiredCompression.Name() != candidateCompression.Name()) {
|
||||
if candidateCompression == nil ||
|
||||
(options.RequiredCompression.Name() != candidateCompression.Name() && options.RequiredCompression.Name() != candidateCompression.BaseVariantName()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
"github.com/containers/image/v5/types"
|
||||
)
|
||||
@ -39,7 +38,7 @@ func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool {
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||
func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||
return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName)
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/manifest/list.go
generated
vendored
@ -73,6 +73,7 @@ type ListUpdate struct {
|
||||
Platform *imgspecv1.Platform
|
||||
Annotations map[string]string
|
||||
CompressionAlgorithmNames []string
|
||||
ArtifactType string
|
||||
}
|
||||
}
|
||||
|
||||
@ -101,6 +102,7 @@ type ListEdit struct {
|
||||
AddDigest digest.Digest
|
||||
AddSize int64
|
||||
AddMediaType string
|
||||
AddArtifactType string
|
||||
AddPlatform *imgspecv1.Platform
|
||||
AddAnnotations map[string]string
|
||||
AddCompressionAlgorithms []compression.Algorithm
|
||||
|
7
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
7
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
@ -169,7 +169,8 @@ func NormalizedMIMEType(input string) string {
|
||||
|
||||
// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values.
|
||||
func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool {
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
// Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm().
|
||||
switch algo.Name() {
|
||||
case compressiontypes.GzipAlgorithmName:
|
||||
return true
|
||||
default:
|
||||
@ -182,7 +183,9 @@ func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes
|
||||
if CompressionAlgorithmIsUniversallySupported(algo) {
|
||||
return true
|
||||
}
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
// This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields.
|
||||
// The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge.
|
||||
switch algo.Name() {
|
||||
case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName:
|
||||
return mimeType == imgspecv1.MediaTypeImageManifest
|
||||
default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere
|
||||
|
28
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
28
vendor/github.com/containers/image/v5/internal/manifest/oci_index.go
generated
vendored
@ -61,6 +61,7 @@ func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate
|
||||
ret.ReadOnly.Platform = manifest.Platform
|
||||
ret.ReadOnly.Annotations = manifest.Annotations
|
||||
ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations)
|
||||
ret.ReadOnly.ArtifactType = manifest.ArtifactType
|
||||
return ret, nil
|
||||
}
|
||||
}
|
||||
@ -102,7 +103,7 @@ func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, an
|
||||
*annotationsMap = map[string]string{}
|
||||
}
|
||||
for _, algo := range compressionAlgorithms {
|
||||
switch algo.Name() {
|
||||
switch algo.BaseVariantName() {
|
||||
case compression.ZstdAlgorithmName:
|
||||
(*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue
|
||||
default:
|
||||
@ -157,11 +158,13 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error {
|
||||
}
|
||||
addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations)
|
||||
addedEntries = append(addedEntries, imgspecv1.Descriptor{
|
||||
MediaType: editInstance.AddMediaType,
|
||||
Size: editInstance.AddSize,
|
||||
Digest: editInstance.AddDigest,
|
||||
Platform: editInstance.AddPlatform,
|
||||
Annotations: annotations})
|
||||
MediaType: editInstance.AddMediaType,
|
||||
ArtifactType: editInstance.AddArtifactType,
|
||||
Size: editInstance.AddSize,
|
||||
Digest: editInstance.AddDigest,
|
||||
Platform: editInstance.AddPlatform,
|
||||
Annotations: annotations,
|
||||
})
|
||||
default:
|
||||
return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation)
|
||||
}
|
||||
@ -299,12 +302,13 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation
|
||||
platform = &platformCopy
|
||||
}
|
||||
m := imgspecv1.Descriptor{
|
||||
MediaType: component.MediaType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: slices.Clone(component.URLs),
|
||||
Annotations: maps.Clone(component.Annotations),
|
||||
Platform: platform,
|
||||
MediaType: component.MediaType,
|
||||
ArtifactType: component.ArtifactType,
|
||||
Size: component.Size,
|
||||
Digest: component.Digest,
|
||||
URLs: slices.Clone(component.URLs),
|
||||
Annotations: maps.Clone(component.Annotations),
|
||||
Platform: platform,
|
||||
}
|
||||
index.Manifests[i] = m
|
||||
}
|
||||
|
12
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
12
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@ -55,7 +55,7 @@ type ImageDestinationInternalOnly interface {
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (UploadedBlob, error)
|
||||
PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error)
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
@ -100,6 +100,12 @@ type PutBlobOptions struct {
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
}
|
||||
|
||||
// PutBlobPartialOptions are used in PutBlobPartial.
|
||||
type PutBlobPartialOptions struct {
|
||||
Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
|
||||
LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs)
|
||||
}
|
||||
|
||||
// TryReusingBlobOptions are used in TryReusingBlobWithOptions.
|
||||
type TryReusingBlobOptions struct {
|
||||
Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update.
|
||||
@ -118,7 +124,7 @@ type TryReusingBlobOptions struct {
|
||||
PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob.
|
||||
RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go
|
||||
OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”.
|
||||
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
|
||||
TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
|
||||
}
|
||||
|
||||
// ReusedBlob is information about a blob reused in a destination.
|
||||
@ -130,6 +136,8 @@ type ReusedBlob struct {
|
||||
// a differently-compressed blob.
|
||||
CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A
|
||||
CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A
|
||||
|
||||
MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes.
|
||||
}
|
||||
|
||||
// ImageSourceChunk is a portion of a blob.
|
||||
|
2
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/common.go
generated
vendored
@ -55,7 +55,7 @@ func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType
|
||||
if variants != nil {
|
||||
name := mtsUncompressed
|
||||
if algorithm != nil {
|
||||
name = algorithm.InternalUnstableUndocumentedMIMEQuestionMark()
|
||||
name = algorithm.BaseVariantName()
|
||||
}
|
||||
if res, ok := variants[name]; ok {
|
||||
if res != mtsUnsupportedMIMEType {
|
||||
|
36
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
36
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@ -9,7 +9,6 @@ import (
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -260,44 +259,9 @@ func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
|
||||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If there is any layer that is using partial content, we calculate the image ID
|
||||
// in a different way since the diffID cannot be validated as for regular pulled images.
|
||||
for _, layer := range m.Layers {
|
||||
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
|
||||
}
|
||||
if toc != nil {
|
||||
return m.calculateImageIDForPartialImage(diffIDs)
|
||||
}
|
||||
}
|
||||
|
||||
return m.Config.Digest.Hex(), nil
|
||||
}
|
||||
|
||||
func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) {
|
||||
newID := digest.Canonical.Digester()
|
||||
for i, layer := range m.Layers {
|
||||
diffID := diffIDs[i]
|
||||
_, err := newID.Hash().Write([]byte(diffID.Hex()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing diffID %q: %w", diffID, err)
|
||||
}
|
||||
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
|
||||
}
|
||||
if toc != nil {
|
||||
_, err = newID.Hash().Write([]byte(toc.Hex()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing TOC %q: %w", toc, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newID.Digest().Hex(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
// (and the code can handle that).
|
||||
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
|
||||
|
5
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
5
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/imagedestination"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
@ -120,8 +119,8 @@ func (d *ociArchiveImageDestination) PutBlobWithOptions(ctx context.Context, str
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
||||
func (d *ociArchiveImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||
return d.unpackedDest.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
|
5
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
5
vendor/github.com/containers/image/v5/openshift/openshift_dest.go
generated
vendored
@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/docker"
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/imagedestination"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
@ -128,8 +127,8 @@ func (d *openshiftImageDestination) PutBlobWithOptions(ctx context.Context, stre
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, cache)
|
||||
func (d *openshiftImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||
return d.docker.PutBlobPartial(ctx, chunkAccessor, srcInfo, options)
|
||||
}
|
||||
|
||||
// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
|
12
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
12
vendor/github.com/containers/image/v5/pkg/compression/compression.go
generated
vendored
@ -19,19 +19,19 @@ type Algorithm = types.Algorithm
|
||||
|
||||
var (
|
||||
// Gzip compression.
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, types.GzipAlgorithmName,
|
||||
Gzip = internal.NewAlgorithm(types.GzipAlgorithmName, "",
|
||||
[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor, gzipCompressor)
|
||||
// Bzip2 compression.
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, types.Bzip2AlgorithmName,
|
||||
Bzip2 = internal.NewAlgorithm(types.Bzip2AlgorithmName, "",
|
||||
[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor, bzip2Compressor)
|
||||
// Xz compression.
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, types.XzAlgorithmName,
|
||||
Xz = internal.NewAlgorithm(types.XzAlgorithmName, "",
|
||||
[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor, xzCompressor)
|
||||
// Zstd compression.
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, types.ZstdAlgorithmName,
|
||||
Zstd = internal.NewAlgorithm(types.ZstdAlgorithmName, "",
|
||||
[]byte{0x28, 0xb5, 0x2f, 0xfd}, ZstdDecompressor, zstdCompressor)
|
||||
// ZstdChunked is a Zstd compression with chunk metadta which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName, /* Note: InternalUnstableUndocumentedMIMEQuestionMark is not ZstdChunkedAlgorithmName */
|
||||
// ZstdChunked is a Zstd compression with chunk metadata which allows random access to individual files.
|
||||
ZstdChunked = internal.NewAlgorithm(types.ZstdChunkedAlgorithmName, types.ZstdAlgorithmName,
|
||||
nil, ZstdDecompressor, compressor.ZstdCompressor)
|
||||
|
||||
compressionAlgorithms = map[string]Algorithm{
|
||||
|
36
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
36
vendor/github.com/containers/image/v5/pkg/compression/internal/types.go
generated
vendored
@ -12,23 +12,28 @@ type DecompressorFunc func(io.Reader) (io.ReadCloser, error)
|
||||
|
||||
// Algorithm is a compression algorithm that can be used for CompressStream.
|
||||
type Algorithm struct {
|
||||
name string
|
||||
mime string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
name string
|
||||
baseVariantName string
|
||||
prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection.
|
||||
decompressor DecompressorFunc
|
||||
compressor CompressorFunc
|
||||
}
|
||||
|
||||
// NewAlgorithm creates an Algorithm instance.
|
||||
// nontrivialBaseVariantName is typically "".
|
||||
// This function exists so that Algorithm instances can only be created by code that
|
||||
// is allowed to import this internal subpackage.
|
||||
func NewAlgorithm(name, mime string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm {
|
||||
baseVariantName := name
|
||||
if nontrivialBaseVariantName != "" {
|
||||
baseVariantName = nontrivialBaseVariantName
|
||||
}
|
||||
return Algorithm{
|
||||
name: name,
|
||||
mime: mime,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
name: name,
|
||||
baseVariantName: baseVariantName,
|
||||
prefix: prefix,
|
||||
decompressor: decompressor,
|
||||
compressor: compressor,
|
||||
}
|
||||
}
|
||||
|
||||
@ -37,10 +42,11 @@ func (c Algorithm) Name() string {
|
||||
return c.name
|
||||
}
|
||||
|
||||
// InternalUnstableUndocumentedMIMEQuestionMark ???
|
||||
// DO NOT USE THIS anywhere outside of c/image until it is properly documented.
|
||||
func (c Algorithm) InternalUnstableUndocumentedMIMEQuestionMark() string {
|
||||
return c.mime
|
||||
// BaseVariantName returns the name of the “base variant” of the compression algorithm.
|
||||
// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”).
|
||||
// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data.
|
||||
func (c Algorithm) BaseVariantName() string {
|
||||
return c.baseVariantName
|
||||
}
|
||||
|
||||
// AlgorithmCompressor returns the compressor field of algo.
|
||||
|
8
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
8
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
@ -142,9 +142,13 @@ func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oi
|
||||
}
|
||||
|
||||
logrus.Debugf("Starting interactive OIDC authentication for issuer %s", oidcIssuerURL.Redacted())
|
||||
// This is intended to match oauthflow.DefaultIDTokenGetter, overriding only input/output
|
||||
// This is intended to match oauthflow.DefaultIDTokenGetter (incl. the update in init()), overriding only input/output
|
||||
htmlPage, err := oauth.GetInteractiveSuccessHTML(false, 10)
|
||||
if err != nil {
|
||||
return fmt.Errorf("formatting HTML content: %w", err)
|
||||
}
|
||||
tokenGetter := &oauthflow.InteractiveIDTokenGetter{
|
||||
HTMLPage: oauth.InteractiveSuccessHTML,
|
||||
HTMLPage: htmlPage,
|
||||
Input: interactiveInput,
|
||||
Output: interactiveOutput,
|
||||
}
|
||||
|
634
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
634
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -16,7 +16,6 @@ import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/blobinfocache"
|
||||
"github.com/containers/image/v5/internal/imagedestination/impl"
|
||||
"github.com/containers/image/v5/internal/imagedestination/stubs"
|
||||
"github.com/containers/image/v5/internal/private"
|
||||
@ -55,41 +54,61 @@ type storageImageDestination struct {
|
||||
stubs.ImplementsPutBlobPartial
|
||||
stubs.AlwaysSupportsSignatures
|
||||
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
|
||||
imageRef storageReference
|
||||
directory string // Temporary directory where we store blobs until Commit() time
|
||||
nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs
|
||||
manifest []byte // Manifest contents, temporary
|
||||
manifestDigest digest.Digest // Valid if len(manifest) != 0
|
||||
untrustedDiffIDValues []digest.Digest // From config’s RootFS.DiffIDs, valid if not nil
|
||||
signatures []byte // Signature contents, temporary
|
||||
signatureses map[digest.Digest][]byte // Instance signature contents, temporary
|
||||
metadata storageImageMetadata // Metadata contents being built
|
||||
|
||||
// A storage destination may be used concurrently. Accesses are
|
||||
// serialized via a mutex. Please refer to the individual comments
|
||||
// below for details.
|
||||
lock sync.Mutex
|
||||
// Mapping from layer (by index) to the associated ID in the storage.
|
||||
// It's protected *implicitly* since `commitLayer()`, at any given
|
||||
// time, can only be executed by *one* goroutine. Please refer to
|
||||
// `queueOrCommit()` for further details on how the single-caller
|
||||
// guarantee is implemented.
|
||||
indexToStorageID map[int]*string
|
||||
// All accesses to below data are protected by `lock` which is made
|
||||
// *explicit* in the code.
|
||||
uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs.
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
|
||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
|
||||
indexToStorageID map[int]string
|
||||
|
||||
// A storage destination may be used concurrently, due to HasThreadSafePutBlob.
|
||||
lock sync.Mutex // Protects lockProtected
|
||||
lockProtected storageImageDestinationLockProtected
|
||||
}
|
||||
|
||||
// storageImageDestinationLockProtected contains storageImageDestination data which might be
|
||||
// accessed concurrently, due to HasThreadSafePutBlob.
|
||||
// _During the concurrent TryReusingBlob/PutBlob/* calls_ (but not necessarily during the final Commit)
|
||||
// uses must hold storageImageDestination.lock.
|
||||
type storageImageDestinationLockProtected struct {
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
|
||||
|
||||
// In general, a layer is identified either by (compressed) digest, or by TOC digest.
|
||||
// When creating a layer, the c/storage layer metadata and image IDs must _only_ be based on trusted values
|
||||
// we have computed ourselves. (Layer reuse can then look up against such trusted values, but it might not
|
||||
// recompute those values for incomding layers — the point of the reuse is that we don’t need to consume the incoming layer.)
|
||||
|
||||
// Layer identification: For a layer, at least one of indexToTOCDigest and blobDiffIDs must be available before commitLayer is called.
|
||||
// The presence of an indexToTOCDigest is what decides how the layer is identified, i.e. which fields must be trusted.
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
indexToTOCDigest map[int]digest.Digest // Mapping from layer index to a TOC Digest, IFF the layer was created/found/reused by TOC digest
|
||||
|
||||
// Layer data: Before commitLayer is called, either at least one of (diffOutputs, blobAdditionalLayer, filenames)
|
||||
// should be available; or indexToTOCDigest/blobDiffIDs should be enough to locate an existing c/storage layer.
|
||||
// They are looked up in the order they are mentioned above.
|
||||
diffOutputs map[int]*graphdriver.DriverWithDifferOutput // Mapping from layer index to a partially-pulled layer intermediate data
|
||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||
// Mapping from layer blobsums to names of files we used to hold them. If set, fileSizes and blobDiffIDs must also be set.
|
||||
filenames map[digest.Digest]string
|
||||
// Mapping from layer blobsums to their sizes. If set, filenames and blobDiffIDs must also be set.
|
||||
fileSizes map[digest.Digest]int64
|
||||
}
|
||||
|
||||
// addedLayerInfo records data about a layer to use in this image.
|
||||
type addedLayerInfo struct {
|
||||
digest digest.Digest
|
||||
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
|
||||
digest digest.Digest // Mandatory, the digest of the layer.
|
||||
emptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept.
|
||||
}
|
||||
|
||||
// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until
|
||||
@ -117,18 +136,23 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
||||
HasThreadSafePutBlob: true,
|
||||
}),
|
||||
|
||||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest),
|
||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
indexToStorageID: make(map[int]*string),
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
|
||||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
metadata: storageImageMetadata{
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
},
|
||||
indexToStorageID: make(map[int]string),
|
||||
lockProtected: storageImageDestinationLockProtected{
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
indexToTOCDigest: make(map[int]digest.Digest),
|
||||
diffOutputs: make(map[int]*graphdriver.DriverWithDifferOutput),
|
||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
},
|
||||
}
|
||||
dest.Compat = impl.AddCompat(dest)
|
||||
return dest, nil
|
||||
@ -142,12 +166,13 @@ func (s *storageImageDestination) Reference() types.ImageReference {
|
||||
|
||||
// Close cleans up the temporary directory and additional layer store handlers.
|
||||
func (s *storageImageDestination) Close() error {
|
||||
for _, al := range s.blobAdditionalLayer {
|
||||
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
for _, al := range s.lockProtected.blobAdditionalLayer {
|
||||
al.Release()
|
||||
}
|
||||
for _, v := range s.diffOutputs {
|
||||
for _, v := range s.lockProtected.diffOutputs {
|
||||
if v.Target != "" {
|
||||
_ = s.imageRef.transport.store.CleanupStagingDirectory(v.Target)
|
||||
_ = s.imageRef.transport.store.CleanupStagedLayer(v)
|
||||
}
|
||||
}
|
||||
return os.RemoveAll(s.directory)
|
||||
@ -227,9 +252,9 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
||||
|
||||
// Record information about the blob.
|
||||
s.lock.Lock()
|
||||
s.uncompressedOrTocDigest[blobDigest] = diffID.Digest()
|
||||
s.fileSizes[blobDigest] = counter.Count
|
||||
s.filenames[blobDigest] = filename
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = diffID.Digest()
|
||||
s.lockProtected.fileSizes[blobDigest] = counter.Count
|
||||
s.lockProtected.filenames[blobDigest] = filename
|
||||
s.lock.Unlock()
|
||||
// This is safe because we have just computed diffID, and blobDigest was either computed
|
||||
// by us, or validated by the caller (usually copy.digestingReader).
|
||||
@ -269,7 +294,7 @@ func (f *zstdFetcher) GetBlobAt(chunks []chunked.ImageSourceChunk) (chan io.Read
|
||||
// It is available only if SupportsPutBlobPartial().
|
||||
// Even if SupportsPutBlobPartial() returns true, the call can fail, in which case the caller
|
||||
// should fall back to PutBlobWithOptions.
|
||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, cache blobinfocache.BlobInfoCache2) (private.UploadedBlob, error) {
|
||||
func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) {
|
||||
fetcher := zstdFetcher{
|
||||
chunkAccessor: chunkAccessor,
|
||||
ctx: ctx,
|
||||
@ -286,13 +311,25 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
|
||||
if out.TOCDigest == "" && out.UncompressedDigest == "" {
|
||||
return private.UploadedBlob{}, errors.New("internal error: ApplyDiffWithDiffer succeeded with neither TOCDigest nor UncompressedDigest set")
|
||||
}
|
||||
|
||||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
s.uncompressedOrTocDigest[blobDigest] = blobDigest
|
||||
s.fileSizes[blobDigest] = 0
|
||||
s.filenames[blobDigest] = ""
|
||||
s.diffOutputs[blobDigest] = out
|
||||
if out.UncompressedDigest != "" {
|
||||
// The computation of UncompressedDigest means the whole layer has been consumed; while doing that, chunked.GetDiffer is
|
||||
// responsible for ensuring blobDigest has been validated.
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = out.UncompressedDigest
|
||||
} else {
|
||||
// Don’t identify layers by TOC if UncompressedDigest is available.
|
||||
// - Using UncompressedDigest allows image reuse with non-partially-pulled layers
|
||||
// - If UncompressedDigest has been computed, that means the layer was read completely, and the TOC has been created from scratch.
|
||||
// That TOC is quite unlikely to match with any other TOC value.
|
||||
s.lockProtected.indexToTOCDigest[options.LayerIndex] = out.TOCDigest
|
||||
}
|
||||
s.lockProtected.diffOutputs[options.LayerIndex] = out
|
||||
s.lock.Unlock()
|
||||
|
||||
return private.UploadedBlob{
|
||||
@ -321,68 +358,79 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
})
|
||||
}
|
||||
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata.
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (blobDigest, size or -1), filling s.blobDiffIDs and other metadata.
|
||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
func (s *storageImageDestination) tryReusingBlobAsPending(blobDigest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if options.SrcRef != nil {
|
||||
// Check if we have the layer in the underlying additional layer store.
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(digest, options.SrcRef.String())
|
||||
aLayer, err := s.imageRef.transport.store.LookupAdditionalLayer(blobDigest, options.SrcRef.String())
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, blobDigest, err)
|
||||
} else if err == nil {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest()
|
||||
s.blobAdditionalLayer[digest] = aLayer
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = aLayer.UncompressedDigest()
|
||||
s.lockProtected.blobAdditionalLayer[blobDigest] = aLayer
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Digest: blobDigest,
|
||||
Size: aLayer.CompressedSize(),
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
if digest == "" {
|
||||
if blobDigest == "" {
|
||||
return false, private.ReusedBlob{}, errors.New(`Can not check for a blob with unknown digest`)
|
||||
}
|
||||
if err := digest.Validate(); err != nil {
|
||||
if err := blobDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
if options.TOCDigest != "" {
|
||||
if err := options.TOCDigest.Validate(); err != nil {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Can not check for a blob with invalid digest: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||
|
||||
// Check if we've already cached it in a file.
|
||||
if size, ok := s.fileSizes[digest]; ok {
|
||||
if size, ok := s.lockProtected.fileSizes[blobDigest]; ok {
|
||||
// s.lockProtected.blobDiffIDs is set either by putBlobToPendingFile or in createNewLayer when creating the
|
||||
// filenames/fileSizes entry.
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Digest: blobDigest,
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, digest, err)
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, blobDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = blobDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Digest: blobDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a was-compressed layer in storage that's based on that blob.
|
||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(digest)
|
||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, digest, err)
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q: %w`, blobDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
// LayersByCompressedDigest only finds layers which were created from a full layer blob, and extracting that
|
||||
// always sets UncompressedDigest.
|
||||
diffID := layers[0].UncompressedDigest
|
||||
if diffID == "" {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("internal error: compressed layer %q (for compressed digest %q) does not have an uncompressed digest", layers[0].ID, blobDigest.String())
|
||||
}
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = diffID
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Digest: blobDigest,
|
||||
Size: layers[0].CompressedSize,
|
||||
}, nil
|
||||
}
|
||||
@ -391,23 +439,23 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
|
||||
// uncompressed layer, and that can happen only if options.CanSubstitute, or if the incoming manifest already specifies the size.
|
||||
if options.CanSubstitute || size != -1 {
|
||||
if uncompressedDigest := options.Cache.UncompressedDigest(digest); uncompressedDigest != "" && uncompressedDigest != digest {
|
||||
if uncompressedDigest := options.Cache.UncompressedDigest(blobDigest); uncompressedDigest != "" && uncompressedDigest != blobDigest {
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with digest %q: %w`, uncompressedDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
if size != -1 {
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[blobDigest] = uncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Digest: blobDigest,
|
||||
Size: size,
|
||||
}, nil
|
||||
}
|
||||
if !options.CanSubstitute {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", blobDigest)
|
||||
}
|
||||
s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
s.lockProtected.blobDiffIDs[uncompressedDigest] = uncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
@ -416,23 +464,30 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
tocDigest := digest
|
||||
if options.TOCDigest != nil {
|
||||
tocDigest = *options.TOCDigest
|
||||
}
|
||||
if options.TOCDigest != "" && options.LayerIndex != nil {
|
||||
// Check if we have a chunked layer in storage with the same TOC digest.
|
||||
layers, err := s.imageRef.transport.store.LayersByTOCDigest(options.TOCDigest)
|
||||
|
||||
// Check if we have a chunked layer in storage with the same TOC digest.
|
||||
layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: layers[0].TOCDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
}, nil
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, options.TOCDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
if size != -1 {
|
||||
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: blobDigest,
|
||||
Size: size,
|
||||
MatchedByTOCDigest: true,
|
||||
}, nil
|
||||
} else if options.CanSubstitute && layers[0].UncompressedDigest != "" {
|
||||
s.lockProtected.indexToTOCDigest[*options.LayerIndex] = options.TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: layers[0].UncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MatchedByTOCDigest: true,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Nope, we don't have it.
|
||||
@ -444,6 +499,8 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
// that since we don't have a recommendation, a random ID should be used if one needs
|
||||
// to be allocated.
|
||||
func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
// This is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
// Build the diffID list. We need the decompressed sums that we've been calculating to
|
||||
// fill in the DiffIDs. It's expected (but not enforced by us) that the number of
|
||||
// diffIDs corresponds to the number of non-EmptyLayer entries in the history.
|
||||
@ -457,28 +514,59 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
continue
|
||||
}
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
diffID, ok := s.uncompressedOrTocDigest[blobSum]
|
||||
diffID, ok := s.lockProtected.blobDiffIDs[blobSum]
|
||||
if !ok {
|
||||
// this can, in principle, legitimately happen when a layer is reused by TOC.
|
||||
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
|
||||
return ""
|
||||
}
|
||||
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
|
||||
}
|
||||
case *manifest.Schema2:
|
||||
case *manifest.Schema2, *manifest.OCI1:
|
||||
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
|
||||
// the diffID list.
|
||||
case *manifest.OCI1:
|
||||
for _, l := range m.Layers {
|
||||
diffIDs = append(diffIDs, l.Digest)
|
||||
}
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
id, err := m.ImageID(diffIDs)
|
||||
|
||||
// We want to use the same ID for “the same” images, but without risking unwanted sharing / malicious image corruption.
|
||||
//
|
||||
// Traditionally that means the same ~config digest, as computed by m.ImageID;
|
||||
// but if we pull a layer by TOC, we verify the layer against neither the (compressed) blob digest in the manifest,
|
||||
// nor against the config’s RootFS.DiffIDs. We don’t really want to do either, to allow partial layer pulls where we never see
|
||||
// most of the data.
|
||||
//
|
||||
// So, if a layer is pulled by TOC (and we do validate against the TOC), the fact that we used the TOC, and the value of the TOC,
|
||||
// must enter into the image ID computation.
|
||||
// But for images where no TOC was used, continue to use IDs computed the traditional way, to maximize image reuse on upgrades,
|
||||
// and to introduce the changed behavior only when partial pulls are used.
|
||||
//
|
||||
// Note that it’s not 100% guaranteed that an image pulled by TOC uses an OCI manifest; consider
|
||||
// (skopeo copy --format v2s2 docker://…/zstd-chunked-image containers-storage:… ). So this is not happening only in the OCI case above.
|
||||
ordinaryImageID, err := m.ImageID(diffIDs)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return id
|
||||
tocIDInput := ""
|
||||
hasLayerPulledByTOC := false
|
||||
for i := range m.LayerInfos() {
|
||||
layerValue := "" // An empty string is not a valid digest, so this is unambiguous with the TOC case.
|
||||
tocDigest, ok := s.lockProtected.indexToTOCDigest[i] // "" if not a TOC
|
||||
if ok {
|
||||
hasLayerPulledByTOC = true
|
||||
layerValue = tocDigest.String()
|
||||
}
|
||||
tocIDInput += layerValue + "|" // "|" can not be present in a TOC digest, so this is an unambiguous separator.
|
||||
}
|
||||
|
||||
if !hasLayerPulledByTOC {
|
||||
return ordinaryImageID
|
||||
}
|
||||
// ordinaryImageID is a digest of a config, which is a JSON value.
|
||||
// To avoid the risk of collisions, start the input with @ so that the input is not a valid JSON.
|
||||
tocImageID := digest.FromString("@With TOC:" + tocIDInput).Hex()
|
||||
logrus.Debugf("Ordinary storage image ID %s; a layer was looked up by TOC, so using image ID %s", ordinaryImageID, tocImageID)
|
||||
return tocImageID
|
||||
}
|
||||
|
||||
// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig
|
||||
@ -491,7 +579,7 @@ func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, er
|
||||
return nil, fmt.Errorf("invalid digest supplied when reading blob: %w", err)
|
||||
}
|
||||
// Assume it's a file, since we're only calling this from a place that expects to read files.
|
||||
if filename, ok := s.filenames[info.Digest]; ok {
|
||||
if filename, ok := s.lockProtected.filenames[info.Digest]; ok {
|
||||
contents, err2 := os.ReadFile(filename)
|
||||
if err2 != nil {
|
||||
return nil, fmt.Errorf(`reading blob from file %q: %w`, filename, err2)
|
||||
@ -525,17 +613,17 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
// caller is the "worker" routine committing layers. All other routines
|
||||
// can continue pulling and queuing in layers.
|
||||
s.lock.Lock()
|
||||
s.indexToAddedLayerInfo[index] = info
|
||||
s.lockProtected.indexToAddedLayerInfo[index] = info
|
||||
|
||||
// We're still waiting for at least one previous/parent layer to be
|
||||
// committed, so there's nothing to do.
|
||||
if index != s.currentIndex {
|
||||
if index != s.lockProtected.currentIndex {
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
info, ok := s.indexToAddedLayerInfo[index]
|
||||
info, ok := s.lockProtected.indexToAddedLayerInfo[index]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
@ -550,25 +638,30 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
|
||||
// Set the index at the very end to make sure that only one routine
|
||||
// enters stage 2).
|
||||
s.currentIndex = index
|
||||
s.lockProtected.currentIndex = index
|
||||
s.lock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known.
|
||||
func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) {
|
||||
// singleLayerIDComponent returns a single layer’s the input to computing a layer (chain) ID,
|
||||
// and an indication whether the input already has the shape of a layer ID.
|
||||
// It returns ("", false) if the layer is not found at all (which should never happen)
|
||||
func (s *storageImageDestination) singleLayerIDComponent(layerIndex int, blobDigest digest.Digest) (string, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if d, found := s.diffOutputs[uncompressedDigest]; found {
|
||||
return d.TOCDigest, found
|
||||
if d, found := s.lockProtected.indexToTOCDigest[layerIndex]; found {
|
||||
return "@TOC=" + d.Hex(), false // "@" is not a valid start of a digest.Digest, so this is unambiguous.
|
||||
}
|
||||
d, found := s.uncompressedOrTocDigest[uncompressedDigest]
|
||||
return d, found
|
||||
|
||||
if d, found := s.lockProtected.blobDiffIDs[blobDigest]; found {
|
||||
return d.Hex(), true // This looks like chain IDs, and it uses the traditional value.
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
//
|
||||
// If the layer cannot be committed yet, the function returns (true, nil).
|
||||
//
|
||||
@ -586,29 +679,38 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
// `s.indexToStorageID` can only be accessed by *one* goroutine at any
|
||||
// given time. Hence, we don't need to lock accesses.
|
||||
var lastLayer string
|
||||
if prev := s.indexToStorageID[index-1]; prev != nil {
|
||||
lastLayer = *prev
|
||||
var parentLayer string
|
||||
if index != 0 {
|
||||
prev, ok := s.indexToStorageID[index-1]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Internal error: commitLayer called with previous layer %d not committed yet", index-1)
|
||||
}
|
||||
parentLayer = prev
|
||||
}
|
||||
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
s.indexToStorageID[index] = parentLayer
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
// The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC.
|
||||
diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest)
|
||||
if !haveDiffIDOrTOCDigest {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// The layerID refers either to the DiffID or the digest of the TOC.
|
||||
layerIDComponent, layerIDComponentStandalone := s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob() / TryReusingBlob() / …
|
||||
//
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache: a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest)
|
||||
//
|
||||
// We are also ignoring lookups by TOC, and other non-trivial situations.
|
||||
// Those can only happen using the c/image/internal/private API,
|
||||
// so those internal callers should be fixed to follow the API instead of expanding this fallback.
|
||||
logrus.Debugf("looking for diffID for blob=%+v", info.digest)
|
||||
|
||||
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
|
||||
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
|
||||
Cache: none.NoCache,
|
||||
@ -618,115 +720,140 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
|
||||
}
|
||||
if !has {
|
||||
return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String())
|
||||
return false, fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
}
|
||||
diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest)
|
||||
if !haveDiffIDOrTOCDigest {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String())
|
||||
|
||||
layerIDComponent, layerIDComponentStandalone = s.singleLayerIDComponent(index, info.digest)
|
||||
if layerIDComponent == "" {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its layer ID", info.digest.String())
|
||||
}
|
||||
}
|
||||
id := diffIDOrTOCDigest.Hex()
|
||||
if lastLayer != "" {
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex()
|
||||
|
||||
id := layerIDComponent
|
||||
if !layerIDComponentStandalone || parentLayer != "" {
|
||||
id = digest.Canonical.FromString(parentLayer + "+" + layerIDComponent).Hex()
|
||||
}
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
s.indexToStorageID[index] = layer.ID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
layer, err := s.createNewLayer(index, info.digest, parentLayer, id)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if layer == nil {
|
||||
return true, nil
|
||||
}
|
||||
s.indexToStorageID[index] = layer.ID
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// createNewLayer creates a new layer newLayerID for (index, layerDigest) on top of parentLayer (which may be "").
|
||||
// If the layer cannot be committed yet, the function returns (nil, nil).
|
||||
func (s *storageImageDestination) createNewLayer(index int, layerDigest digest.Digest, parentLayer, newLayerID string) (*storage.Layer, error) {
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.diffOutputs[info.digest]
|
||||
diffOutput, ok := s.lockProtected.diffOutputs[index]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
if s.manifest == nil {
|
||||
logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id)
|
||||
return true, nil
|
||||
var untrustedUncompressedDigest digest.Digest
|
||||
if diffOutput.UncompressedDigest == "" {
|
||||
d, err := s.untrustedLayerDiffID(index)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d == "" {
|
||||
logrus.Debugf("Skipping commit for layer %q, manifest not yet available", newLayerID)
|
||||
return nil, nil
|
||||
}
|
||||
untrustedUncompressedDigest = d
|
||||
}
|
||||
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if index >= len(configOCI.RootFS.DiffIDs) {
|
||||
return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index)
|
||||
}
|
||||
|
||||
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// let the storage layer know what was the original uncompressed layer.
|
||||
flags := make(map[string]interface{})
|
||||
flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index]
|
||||
logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id)
|
||||
options := &graphdriver.ApplyDiffWithDifferOpts{
|
||||
Flags: flags,
|
||||
if untrustedUncompressedDigest != "" {
|
||||
flags[expectedLayerDiffIDFlag] = untrustedUncompressedDigest
|
||||
logrus.Debugf("Setting uncompressed digest to %q for layer %q", untrustedUncompressedDigest, newLayerID)
|
||||
}
|
||||
|
||||
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil {
|
||||
_ = s.imageRef.transport.store.Delete(layer.ID)
|
||||
return false, err
|
||||
}
|
||||
args := storage.ApplyStagedLayerOptions{
|
||||
ID: newLayerID,
|
||||
ParentLayer: parentLayer,
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return false, nil
|
||||
DiffOutput: diffOutput,
|
||||
DiffOptions: &graphdriver.ApplyDiffWithDifferOpts{
|
||||
Flags: flags,
|
||||
},
|
||||
}
|
||||
layer, err := s.imageRef.transport.store.ApplyStagedLayer(args)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return nil, fmt.Errorf("failed to put layer using a partial pull: %w", err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
al, ok := s.blobAdditionalLayer[info.digest]
|
||||
al, ok := s.lockProtected.blobAdditionalLayer[layerDigest]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
layer, err := al.PutAs(id, lastLayer, nil)
|
||||
layer, err := al.PutAs(newLayerID, parentLayer, nil)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return false, fmt.Errorf("failed to put layer from digest and labels: %w", err)
|
||||
return nil, fmt.Errorf("failed to put layer from digest and labels: %w", err)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return false, nil
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
// then we need to read the desired contents from a layer.
|
||||
var trustedUncompressedDigest, trustedOriginalDigest digest.Digest // For storage.LayerOptions
|
||||
s.lock.Lock()
|
||||
filename, ok := s.filenames[info.digest]
|
||||
tocDigest := s.lockProtected.indexToTOCDigest[index] // "" if not set
|
||||
optionalDiffID := s.lockProtected.blobDiffIDs[layerDigest] // "" if not set
|
||||
filename, gotFilename := s.lockProtected.filenames[layerDigest]
|
||||
s.lock.Unlock()
|
||||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
} else {
|
||||
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(info.digest)
|
||||
if gotFilename && tocDigest == "" {
|
||||
// If tocDigest != "", if we now happen to find a layerDigest match, the newLayerID has already been computed as TOC-based,
|
||||
// and we don't know the relationship of the layerDigest and TOC digest.
|
||||
// We could recompute newLayerID to be DiffID-based and use the file, but such a within-image layer
|
||||
// reuse is expected to be pretty rare; instead, ignore the unexpected file match and proceed to the
|
||||
// originally-planned TOC match.
|
||||
|
||||
// Because tocDigest == "", optionaldiffID must have been set; and even if it weren’t, PutLayer will recompute the digest from the stream.
|
||||
trustedUncompressedDigest = optionalDiffID
|
||||
trustedOriginalDigest = layerDigest // The code setting .filenames[layerDigest] is responsible for the contents matching.
|
||||
} else {
|
||||
// Try to find the layer with contents matching the data we use.
|
||||
var layer *storage.Layer // = nil
|
||||
if tocDigest != "" {
|
||||
layers, err2 := s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
layer = &layers[0]
|
||||
} else {
|
||||
return nil, fmt.Errorf("locating layer for TOC digest %q: %w", tocDigest, err2)
|
||||
}
|
||||
} else {
|
||||
// Because tocDigest == "", optionaldiffID must have been set
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(optionalDiffID)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
} else {
|
||||
layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(layerDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = &layers[0]
|
||||
}
|
||||
}
|
||||
if layer == nil {
|
||||
return nil, fmt.Errorf("locating layer for blob %q: %w", layerDigest, err2)
|
||||
}
|
||||
}
|
||||
if layer == "" {
|
||||
return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
|
||||
}
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
diffOptions := &storage.DiffOptions{
|
||||
Compression: &noCompression,
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer.ID, diffOptions)
|
||||
if err2 != nil {
|
||||
return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
|
||||
return nil, fmt.Errorf("reading layer %q for blob %q: %w", layer.ID, layerDigest, err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
@ -736,41 +863,112 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return false, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
return nil, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
// ctx.Done().
|
||||
_, err = io.Copy(file, diff)
|
||||
fileSize, err := io.Copy(file, diff)
|
||||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
return nil, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
}
|
||||
|
||||
if optionalDiffID == "" && layer.UncompressedDigest != "" {
|
||||
optionalDiffID = layer.UncompressedDigest
|
||||
}
|
||||
// The stream we have is uncompressed, this matches contents of the stream.
|
||||
// If tocDigest != "", trustedUncompressedDigest might still be ""; in that case PutLayer will compute the value from the stream.
|
||||
trustedUncompressedDigest = optionalDiffID
|
||||
// FIXME? trustedOriginalDigest could be set to layerDigest IF tocDigest == "" (otherwise layerDigest is untrusted).
|
||||
// But for c/storage to reasonably use it (as a CompressedDigest value), we should also ensure the CompressedSize of the created
|
||||
// layer is correct, and the API does not currently make it possible (.CompressedSize is set from the input stream).
|
||||
//
|
||||
// We can legitimately set storage.LayerOptions.OriginalDigest to "",
|
||||
// but that would just result in PutLayer computing the digest of the input stream == optionalDiffID.
|
||||
// So, instead, set .OriginalDigest to the value we know already, to avoid that digest computation.
|
||||
trustedOriginalDigest = optionalDiffID
|
||||
|
||||
// Allow using the already-collected layer contents without extracting the layer again.
|
||||
//
|
||||
// This only matches against the uncompressed digest.
|
||||
// We don’t have the original compressed data here to trivially set filenames[layerDigest].
|
||||
// In particular we can’t achieve the correct Layer.CompressedSize value with the current c/storage API.
|
||||
// Within-image layer reuse is probably very rare, for now we prefer to avoid that complexity.
|
||||
if trustedUncompressedDigest != "" {
|
||||
s.lock.Lock()
|
||||
s.lockProtected.blobDiffIDs[trustedUncompressedDigest] = trustedUncompressedDigest
|
||||
s.lockProtected.filenames[trustedUncompressedDigest] = filename
|
||||
s.lockProtected.fileSizes[trustedUncompressedDigest] = fileSize
|
||||
s.lock.Unlock()
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
s.lock.Lock()
|
||||
s.filenames[info.digest] = filename
|
||||
s.lock.Unlock()
|
||||
}
|
||||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("opening file %q: %w", filename, err)
|
||||
return nil, fmt.Errorf("opening file %q: %w", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: info.digest,
|
||||
UncompressedDigest: diffIDOrTOCDigest,
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(newLayerID, parentLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: trustedOriginalDigest,
|
||||
UncompressedDigest: trustedUncompressedDigest,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
|
||||
return nil, fmt.Errorf("adding layer with blob %q: %w", layerDigest, err)
|
||||
}
|
||||
return layer, nil
|
||||
}
|
||||
|
||||
// untrustedLayerDiffID returns a DiffID value for layerIndex from the image’s config.
|
||||
// If the value is not yet available (but it can be available after s.manifets is set), it returns ("", nil).
|
||||
// WARNING: We don’t validate the DiffID value against the layer contents; it must not be used for any deduplication.
|
||||
func (s *storageImageDestination) untrustedLayerDiffID(layerIndex int) (digest.Digest, error) {
|
||||
// At this point, we are either inside the multi-threaded scope of HasThreadSafePutBlob, and
|
||||
// nothing is writing to s.manifest yet, or PutManifest has been called and s.manifest != nil.
|
||||
// Either way this function does not need the protection of s.lock.
|
||||
if s.manifest == nil {
|
||||
logrus.Debugf("Skipping commit for layer %d, manifest not yet available", layerIndex)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return false, nil
|
||||
if s.untrustedDiffIDValues == nil {
|
||||
mt := manifest.GuessMIMEType(s.manifest)
|
||||
if mt != imgspecv1.MediaTypeImageManifest {
|
||||
// We could, in principle, build an ImageSource, support arbitrary image formats using image.FromUnparsedImage,
|
||||
// and then use types.Image.OCIConfig so that we can parse the image.
|
||||
//
|
||||
// In practice, this should, right now, only matter for pulls of OCI images (this code path implies that a layer has annotation),
|
||||
// while converting to a non-OCI formats, using a manual (skopeo copy) or something similar, not (podman pull).
|
||||
// So it is not implemented yet.
|
||||
return "", fmt.Errorf("determining DiffID for manifest type %q is not yet supported", mt)
|
||||
}
|
||||
man, err := manifest.FromBlob(s.manifest, mt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return "", err
|
||||
}
|
||||
s.untrustedDiffIDValues = slices.Clone(configOCI.RootFS.DiffIDs)
|
||||
if s.untrustedDiffIDValues == nil { // Unlikely but possible in theory…
|
||||
s.untrustedDiffIDValues = []digest.Digest{}
|
||||
}
|
||||
}
|
||||
if layerIndex >= len(s.untrustedDiffIDValues) {
|
||||
return "", fmt.Errorf("image config has only %d DiffID values, but a layer with index %d exists", len(s.untrustedDiffIDValues), layerIndex)
|
||||
}
|
||||
return s.untrustedDiffIDValues[layerIndex], nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
@ -781,6 +979,8 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
// - Uploaded data MAY be visible to others before Commit() is called
|
||||
// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
|
||||
func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error {
|
||||
// This function is outside of the scope of HasThreadSafePutBlob, so we don’t need to hold s.lock.
|
||||
|
||||
if len(s.manifest) == 0 {
|
||||
return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()")
|
||||
}
|
||||
@ -827,12 +1027,12 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||
}
|
||||
}
|
||||
var lastLayer string
|
||||
if len(layerBlobs) > 0 { // Can happen when using caches
|
||||
prev := s.indexToStorageID[len(layerBlobs)-1]
|
||||
if prev == nil {
|
||||
if len(layerBlobs) > 0 { // Zero-layer images rarely make sense, but it is technically possible, and may happen for non-image artifacts.
|
||||
prev, ok := s.indexToStorageID[len(layerBlobs)-1]
|
||||
if !ok {
|
||||
return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1)
|
||||
}
|
||||
lastLayer = *prev
|
||||
lastLayer = prev
|
||||
}
|
||||
|
||||
// If one of those blobs was a configuration blob, then we can try to dig out the date when the image
|
||||
@ -846,14 +1046,14 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||
// Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so
|
||||
// we just need to screen out the ones that are actually layers to get the list of non-layers.
|
||||
dataBlobs := set.New[digest.Digest]()
|
||||
for blob := range s.filenames {
|
||||
for blob := range s.lockProtected.filenames {
|
||||
dataBlobs.Add(blob)
|
||||
}
|
||||
for _, layerBlob := range layerBlobs {
|
||||
dataBlobs.Delete(layerBlob.Digest)
|
||||
}
|
||||
for _, blob := range dataBlobs.Values() {
|
||||
v, err := os.ReadFile(s.filenames[blob])
|
||||
v, err := os.ReadFile(s.lockProtected.filenames[blob])
|
||||
if err != nil {
|
||||
return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err)
|
||||
}
|
||||
@ -906,7 +1106,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||
}
|
||||
|
||||
// Set up to save our metadata.
|
||||
metadata, err := json.Marshal(s)
|
||||
metadata, err := json.Marshal(s.metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding metadata for image: %w", err)
|
||||
}
|
||||
@ -1011,7 +1211,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
||||
}
|
||||
if instanceDigest == nil {
|
||||
s.signatures = sigblob
|
||||
s.SignatureSizes = sizes
|
||||
s.metadata.SignatureSizes = sizes
|
||||
if len(s.manifest) > 0 {
|
||||
manifestDigest := s.manifestDigest
|
||||
instanceDigest = &manifestDigest
|
||||
@ -1019,7 +1219,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s
|
||||
}
|
||||
if instanceDigest != nil {
|
||||
s.signatureses[*instanceDigest] = sigblob
|
||||
s.SignaturesSizes[*instanceDigest] = sizes
|
||||
s.metadata.SignaturesSizes[*instanceDigest] = sizes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
16
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
16
vendor/github.com/containers/image/v5/storage/storage_image.go
generated
vendored
@ -18,11 +18,6 @@ var (
|
||||
ErrNoSuchImage = storage.ErrNotAnImage
|
||||
)
|
||||
|
||||
type storageImageCloser struct {
|
||||
types.ImageCloser
|
||||
size int64
|
||||
}
|
||||
|
||||
// manifestBigDataKey returns a key suitable for recording a manifest with the specified digest using storage.Store.ImageBigData and related functions.
|
||||
// If a specific manifest digest is explicitly requested by the user, the key returned by this function should be used preferably;
|
||||
// for compatibility, if a manifest is not available under this key, check also storage.ImageDigestBigDataKey
|
||||
@ -36,6 +31,17 @@ func signatureBigDataKey(digest digest.Digest) string {
|
||||
return "signature-" + digest.Encoded()
|
||||
}
|
||||
|
||||
// storageImageMetadata is stored, as JSON, in storage.Image.Metadata
|
||||
type storageImageMetadata struct {
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // Sizes of each manifest's signature slice
|
||||
}
|
||||
|
||||
type storageImageCloser struct {
|
||||
types.ImageCloser
|
||||
size int64
|
||||
}
|
||||
|
||||
// Size() returns the previously-computed size of the image, with no error.
|
||||
func (s *storageImageCloser) Size() (int64, error) {
|
||||
return s.size, nil
|
||||
|
81
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
81
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
@ -29,16 +29,6 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex.
|
||||
type getBlobMutexProtected struct {
|
||||
// digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the
|
||||
// layer ID in the store.
|
||||
digestToLayerID map[digest.Digest]string
|
||||
|
||||
// layerPosition stores where we are in reading a blob's layers
|
||||
layerPosition map[digest.Digest]int
|
||||
}
|
||||
|
||||
type storageImageSource struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
@ -47,13 +37,25 @@ type storageImageSource struct {
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID)
|
||||
metadata storageImageMetadata
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
// getBlobMutexProtected contains storageImageSource data protected by getBlobMutex.
|
||||
type getBlobMutexProtected struct {
|
||||
// digestToLayerID is a lookup map from a possibly-untrusted uncompressed layer digest (as returned by LayerInfosForCopy) to the
|
||||
// layer ID in the store.
|
||||
digestToLayerID map[digest.Digest]string
|
||||
|
||||
// layerPosition stores where we are in reading a blob's layers
|
||||
layerPosition map[digest.Digest]int
|
||||
}
|
||||
|
||||
// expectedLayerDiffIDFlag is a per-layer flag containing an UNTRUSTED uncompressed digest of the layer.
|
||||
// It is set when pulling a layer by TOC; later, this value is used with digestToLayerID
|
||||
// to allow identifying the layer — and the consumer is expected to verify the blob returned by GetBlob against the digest.
|
||||
const expectedLayerDiffIDFlag = "expected-layer-diffid"
|
||||
|
||||
// newImageSource sets up an image for reading.
|
||||
@ -71,11 +73,13 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
|
||||
}),
|
||||
NoGetBlobAtInitialize: stubs.NoGetBlobAt(imageRef),
|
||||
|
||||
imageRef: imageRef,
|
||||
systemContext: sys,
|
||||
image: img,
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
imageRef: imageRef,
|
||||
systemContext: sys,
|
||||
image: img,
|
||||
metadata: storageImageMetadata{
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
},
|
||||
getBlobMutexProtected: getBlobMutexProtected{
|
||||
digestToLayerID: make(map[digest.Digest]string),
|
||||
layerPosition: make(map[digest.Digest]int),
|
||||
@ -83,7 +87,7 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
|
||||
}
|
||||
image.Compat = impl.AddCompat(image)
|
||||
if img.Metadata != "" {
|
||||
if err := json.Unmarshal([]byte(img.Metadata), image); err != nil {
|
||||
if err := json.Unmarshal([]byte(img.Metadata), &image.metadata); err != nil {
|
||||
return nil, fmt.Errorf("decoding metadata for source image: %w", err)
|
||||
}
|
||||
}
|
||||
@ -118,8 +122,9 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
||||
|
||||
var layers []storage.Layer
|
||||
|
||||
// If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest
|
||||
// to retrieve it from the storage.
|
||||
// This lookup path is strictly necessary for layers identified by TOC digest
|
||||
// (where LayersByUncompressedDigest might not find our layer);
|
||||
// for other layers it is an optimization to avoid the cost of the LayersByUncompressedDigest call.
|
||||
s.getBlobMutex.Lock()
|
||||
layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
|
||||
s.getBlobMutex.Unlock()
|
||||
@ -297,29 +302,29 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
|
||||
}
|
||||
if layer.UncompressedDigest == "" && layer.TOCDigest == "" {
|
||||
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
|
||||
}
|
||||
if layer.UncompressedSize < 0 {
|
||||
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
|
||||
}
|
||||
|
||||
blobDigest := layer.UncompressedDigest
|
||||
|
||||
if layer.TOCDigest != "" {
|
||||
if blobDigest == "" {
|
||||
if layer.TOCDigest == "" {
|
||||
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
|
||||
}
|
||||
if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil {
|
||||
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
|
||||
}
|
||||
if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok {
|
||||
// if the layer is stored by its TOC, report the expected diffID as the layer Digest
|
||||
// but store the TOC digest so we can later retrieve it from the storage.
|
||||
blobDigest, err = digest.Parse(expectedDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
||||
}
|
||||
} else {
|
||||
expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
|
||||
}
|
||||
// If the layer is stored by its TOC, report the expected diffID as the layer Digest;
|
||||
// the generic code is responsible for validating the digest.
|
||||
// We can locate the layer without further c/storage help using s.getBlobMutexProtected.digestToLayerID.
|
||||
blobDigest, err = digest.Parse(expectedDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
||||
}
|
||||
}
|
||||
s.getBlobMutex.Lock()
|
||||
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
|
||||
@ -375,11 +380,11 @@ func buildLayerInfosForCopy(manifestInfos []manifest.LayerInfo, physicalInfos []
|
||||
func (s *storageImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) {
|
||||
var offset int
|
||||
signatureBlobs := []byte{}
|
||||
signatureSizes := s.SignatureSizes
|
||||
signatureSizes := s.metadata.SignatureSizes
|
||||
key := "signatures"
|
||||
instance := "default instance"
|
||||
if instanceDigest != nil {
|
||||
signatureSizes = s.SignaturesSizes[*instanceDigest]
|
||||
signatureSizes = s.metadata.SignaturesSizes[*instanceDigest]
|
||||
key = signatureBigDataKey(*instanceDigest)
|
||||
instance = instanceDigest.Encoded()
|
||||
}
|
||||
@ -425,7 +430,7 @@ func (s *storageImageSource) getSize() (int64, error) {
|
||||
sum += bigSize
|
||||
}
|
||||
// Add the signature sizes.
|
||||
for _, sigSize := range s.SignatureSizes {
|
||||
for _, sigSize := range s.metadata.SignatureSizes {
|
||||
sum += int64(sigSize)
|
||||
}
|
||||
// Walk the layer list.
|
||||
|
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
2
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -23,7 +23,7 @@ env:
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20231208t193858z-f39f38d13"
|
||||
IMAGE_SUFFIX: "c20240102t155643z-f39f38d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
|
2
vendor/github.com/containers/storage/Makefile
generated
vendored
2
vendor/github.com/containers/storage/Makefile
generated
vendored
@ -41,7 +41,7 @@ containers-storage: ## build using gc on the host
|
||||
$(GO) build -compiler gc $(BUILDFLAGS) ./cmd/containers-storage
|
||||
|
||||
codespell:
|
||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L worl,flate,uint,iff,od,ERRO -w
|
||||
codespell -S Makefile,build,buildah,buildah.spec,imgtype,copy,AUTHORS,bin,vendor,.git,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L plack,worl,flate,uint,iff,od,ERRO -w
|
||||
|
||||
binary local-binary: containers-storage
|
||||
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.52.0
|
||||
1.52.1-dev
|
||||
|
22
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
22
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -196,6 +196,8 @@ type DriverWithDifferOutput struct {
|
||||
BigData map[string][]byte
|
||||
TarSplit []byte
|
||||
TOCDigest digest.Digest
|
||||
// RootDirMode is the mode of the root directory of the layer, if specified.
|
||||
RootDirMode *os.FileMode
|
||||
// Artifacts is a collection of additional artifacts
|
||||
// generated by the differ that the storage driver can use.
|
||||
Artifacts map[string]interface{}
|
||||
@ -212,10 +214,26 @@ const (
|
||||
DifferOutputFormatFlat
|
||||
)
|
||||
|
||||
type DifferFsVerity int
|
||||
|
||||
const (
|
||||
// DifferFsVerityDisabled means no fs-verity is used
|
||||
DifferFsVerityDisabled = iota
|
||||
|
||||
// DifferFsVerityEnabled means fs-verity is used when supported
|
||||
DifferFsVerityEnabled
|
||||
|
||||
// DifferFsVerityRequired means fs-verity is required
|
||||
DifferFsVerityRequired
|
||||
)
|
||||
|
||||
// DifferOptions overrides how the differ work
|
||||
type DifferOptions struct {
|
||||
// Format defines the destination directory layout format
|
||||
Format DifferOutputFormat
|
||||
|
||||
// UseFsVerity defines whether fs-verity is used
|
||||
UseFsVerity DifferFsVerity
|
||||
}
|
||||
|
||||
// Differ defines the interface for using a custom differ.
|
||||
@ -231,8 +249,8 @@ type DriverWithDiffer interface {
|
||||
// ApplyDiffWithDiffer applies the changes using the callback function.
|
||||
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the diffOutput target directory.
|
||||
ApplyDiffFromStagingDirectory(id, parent string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
// DifferTarget gets the location where files are stored for the layer.
|
||||
|
72
vendor/github.com/containers/storage/drivers/overlay/composefs.go
generated
vendored
72
vendor/github.com/containers/storage/drivers/overlay/composefs.go
generated
vendored
@ -7,15 +7,13 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/dump"
|
||||
"github.com/containers/storage/pkg/fsverity"
|
||||
"github.com/containers/storage/pkg/loopback"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
@ -34,72 +32,6 @@ func getComposeFsHelper() (string, error) {
|
||||
return composeFsHelperPath, composeFsHelperErr
|
||||
}
|
||||
|
||||
func enableVerity(description string, fd int) error {
|
||||
enableArg := unix.FsverityEnableArg{
|
||||
Version: 1,
|
||||
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
|
||||
Block_size: 4096,
|
||||
}
|
||||
|
||||
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
|
||||
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
|
||||
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type verityDigest struct {
|
||||
Fsv unix.FsverityDigest
|
||||
Buf [64]byte
|
||||
}
|
||||
|
||||
func measureVerity(description string, fd int) (string, error) {
|
||||
var digest verityDigest
|
||||
digest.Fsv.Size = 64
|
||||
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
|
||||
if e1 != 0 {
|
||||
return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
|
||||
}
|
||||
return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
|
||||
}
|
||||
|
||||
func enableVerityRecursive(root string) (map[string]string, error) {
|
||||
digests := make(map[string]string)
|
||||
walkFn := func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !d.Type().IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
if err := enableVerity(path, int(f.Fd())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
verity, err := measureVerity(path, int(f.Fd()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
relPath, err := filepath.Rel(root, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
digests[relPath] = verity
|
||||
return nil
|
||||
}
|
||||
err := filepath.WalkDir(root, walkFn)
|
||||
return digests, err
|
||||
}
|
||||
|
||||
func getComposefsBlob(dataDir string) string {
|
||||
return filepath.Join(dataDir, "composefs.blob")
|
||||
}
|
||||
@ -151,7 +83,7 @@ func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, com
|
||||
return err
|
||||
}
|
||||
|
||||
if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
|
||||
if err := fsverity.EnableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
|
||||
logrus.Warningf("%s", err)
|
||||
}
|
||||
|
||||
|
253
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
253
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -82,7 +82,8 @@ const (
|
||||
lowerFile = "lower"
|
||||
maxDepth = 500
|
||||
|
||||
tocArtifact = "toc"
|
||||
tocArtifact = "toc"
|
||||
fsVerityDigestsArtifact = "fs-verity-digests"
|
||||
|
||||
// idLength represents the number of random characters
|
||||
// which can be used to create the unique link identifier
|
||||
@ -295,7 +296,7 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
|
||||
// a bunch of network file systems...
|
||||
case graphdriver.FsMagicNfsFs, graphdriver.FsMagicSmbFs, graphdriver.FsMagicAcfs,
|
||||
graphdriver.FsMagicAfs, graphdriver.FsMagicCephFs, graphdriver.FsMagicCIFS,
|
||||
graphdriver.FsMagicFHGFSFs, graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
|
||||
graphdriver.FsMagicGPFS, graphdriver.FsMagicIBRIX,
|
||||
graphdriver.FsMagicKAFS, graphdriver.FsMagicLUSTRE, graphdriver.FsMagicNCP,
|
||||
graphdriver.FsMagicNFSD, graphdriver.FsMagicOCFS2, graphdriver.FsMagicPANFS,
|
||||
graphdriver.FsMagicPRLFS, graphdriver.FsMagicSMB2, graphdriver.FsMagicSNFS,
|
||||
@ -309,16 +310,6 @@ func isNetworkFileSystem(fsMagic graphdriver.FsMagic) bool {
|
||||
// If overlay filesystem is not supported on the host, a wrapped graphdriver.ErrNotSupported is returned as error.
|
||||
// If an overlay filesystem is not supported over an existing filesystem then a wrapped graphdriver.ErrIncompatibleFS is returned.
|
||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
// If custom --imagestore is selected never
|
||||
// ditch the original graphRoot, instead add it as
|
||||
// additionalImageStore so its images can still be
|
||||
// read and used.
|
||||
if options.ImageStore != "" {
|
||||
graphRootAsAdditionalStore := fmt.Sprintf("AdditionalImageStore=%s", options.ImageStore)
|
||||
options.DriverOptions = append(options.DriverOptions, graphRootAsAdditionalStore)
|
||||
// complete base name with driver name included
|
||||
options.ImageStore = filepath.Join(options.ImageStore, "overlay")
|
||||
}
|
||||
opts, err := parseOptions(options.DriverOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -862,22 +853,15 @@ func (d *Driver) Status() [][2]string {
|
||||
// Metadata returns meta data about the overlay driver such as
|
||||
// LowerDir, UpperDir, WorkDir and MergeDir used to store data.
|
||||
func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
dir := d.dir(id)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imagestore != "" {
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workDirBase = imagestore
|
||||
}
|
||||
|
||||
metadata := map[string]string{
|
||||
"WorkDir": path.Join(workDirBase, "work"),
|
||||
"MergedDir": path.Join(workDirBase, "merged"),
|
||||
"UpperDir": path.Join(workDirBase, "diff"),
|
||||
"WorkDir": path.Join(dir, "work"),
|
||||
"MergedDir": path.Join(dir, "merged"),
|
||||
"UpperDir": path.Join(dir, "diff"),
|
||||
}
|
||||
|
||||
lowerDirs, err := d.getLowerDirs(id)
|
||||
@ -895,7 +879,7 @@ func (d *Driver) Metadata(id string) (map[string]string, error) {
|
||||
// is being shutdown. For now, we just have to unmount the bind mounted
|
||||
// we had created.
|
||||
func (d *Driver) Cleanup() error {
|
||||
_ = os.RemoveAll(d.getStagingDir())
|
||||
_ = os.RemoveAll(filepath.Join(d.home, stagingDir))
|
||||
return mount.Unmount(d.home)
|
||||
}
|
||||
|
||||
@ -991,8 +975,10 @@ func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) (retErr
|
||||
return d.create(id, parent, opts, true)
|
||||
}
|
||||
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disableQuota bool) (retErr error) {
|
||||
dir, imageStore, _ := d.dir2(id)
|
||||
func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, readOnly bool) (retErr error) {
|
||||
dir, homedir, _ := d.dir2(id, readOnly)
|
||||
|
||||
disableQuota := readOnly
|
||||
|
||||
uidMaps := d.uidMaps
|
||||
gidMaps := d.gidMaps
|
||||
@ -1003,7 +989,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
}
|
||||
|
||||
// Make the link directory if it does not exist
|
||||
if err := idtools.MkdirAllAs(path.Join(d.home, linkDir), 0o755, 0, 0); err != nil {
|
||||
if err := idtools.MkdirAllAs(path.Join(homedir, linkDir), 0o755, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1020,20 +1006,8 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
if err := idtools.MkdirAllAndChownNew(path.Dir(dir), 0o755, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imageStore != "" {
|
||||
workDirBase = imageStore
|
||||
if err := idtools.MkdirAllAndChownNew(path.Dir(imageStore), 0o755, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if parent != "" {
|
||||
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
|
||||
// If parentBase path is additional image store, select the image contained in parentBase.
|
||||
// See https://github.com/containers/podman/issues/19748
|
||||
if parentImageStore != "" && !inAdditionalStore {
|
||||
parentBase = parentImageStore
|
||||
}
|
||||
parentBase := d.dir(parent)
|
||||
st, err := system.Stat(filepath.Join(parentBase, "diff"))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1054,11 +1028,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
if err := idtools.MkdirAllAndChownNew(dir, 0o700, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err := idtools.MkdirAllAndChownNew(imageStore, 0o700, idPair); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// Clean up on failure
|
||||
@ -1066,11 +1035,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
if err2 := os.RemoveAll(dir); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", dir, err2)
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err2 := os.RemoveAll(workDirBase); err2 != nil {
|
||||
logrus.Errorf("While recovering from a failure creating a layer, error deleting %#v: %v", workDirBase, err2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -1093,11 +1057,6 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
if err := d.quotaCtl.SetQuota(dir, quota); err != nil {
|
||||
return err
|
||||
}
|
||||
if imageStore != "" {
|
||||
if err := d.quotaCtl.SetQuota(imageStore, quota); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
perms := defaultPerms
|
||||
@ -1106,12 +1065,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
}
|
||||
|
||||
if parent != "" {
|
||||
parentBase, parentImageStore, inAdditionalStore := d.dir2(parent)
|
||||
// If parentBase path is additional image store, select the image contained in parentBase.
|
||||
// See https://github.com/containers/podman/issues/19748
|
||||
if parentImageStore != "" && !inAdditionalStore {
|
||||
parentBase = parentImageStore
|
||||
}
|
||||
parentBase := d.dir(parent)
|
||||
st, err := system.Stat(filepath.Join(parentBase, "diff"))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1119,17 +1073,14 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
perms = os.FileMode(st.Mode())
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "diff"), perms, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "diff"), perms, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lid := generateID(idLength)
|
||||
|
||||
linkBase := path.Join("..", id, "diff")
|
||||
if imageStore != "" {
|
||||
linkBase = path.Join(imageStore, "diff")
|
||||
}
|
||||
if err := os.Symlink(linkBase, path.Join(d.home, linkDir, lid)); err != nil {
|
||||
if err := os.Symlink(linkBase, path.Join(homedir, linkDir, lid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1138,10 +1089,10 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable
|
||||
return err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "work"), 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1223,26 +1174,39 @@ func (d *Driver) getLower(parent string) (string, error) {
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
p, _, _ := d.dir2(id)
|
||||
p, _, _ := d.dir2(id, false)
|
||||
return p
|
||||
}
|
||||
|
||||
func (d *Driver) dir2(id string) (string, string, bool) {
|
||||
newpath := path.Join(d.home, id)
|
||||
imageStore := ""
|
||||
func (d *Driver) getAllImageStores() []string {
|
||||
additionalImageStores := d.AdditionalImageStores()
|
||||
if d.imageStore != "" {
|
||||
imageStore = path.Join(d.imageStore, id)
|
||||
additionalImageStores = append([]string{d.imageStore}, additionalImageStores...)
|
||||
}
|
||||
return additionalImageStores
|
||||
}
|
||||
|
||||
func (d *Driver) dir2(id string, useImageStore bool) (string, string, bool) {
|
||||
var homedir string
|
||||
|
||||
if useImageStore && d.imageStore != "" {
|
||||
homedir = path.Join(d.imageStore, d.name)
|
||||
} else {
|
||||
homedir = d.home
|
||||
}
|
||||
|
||||
newpath := path.Join(homedir, id)
|
||||
|
||||
if _, err := os.Stat(newpath); err != nil {
|
||||
for _, p := range d.AdditionalImageStores() {
|
||||
for _, p := range d.getAllImageStores() {
|
||||
l := path.Join(p, d.name, id)
|
||||
_, err = os.Stat(l)
|
||||
if err == nil {
|
||||
return l, imageStore, true
|
||||
return l, homedir, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return newpath, imageStore, false
|
||||
return newpath, homedir, false
|
||||
}
|
||||
|
||||
func (d *Driver) getLowerDirs(id string) ([]string, error) {
|
||||
@ -1452,14 +1416,11 @@ func (d *Driver) Get(id string, options graphdriver.MountOpts) (string, error) {
|
||||
}
|
||||
|
||||
func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountOpts) (_ string, retErr error) {
|
||||
dir, imageStore, inAdditionalStore := d.dir2(id)
|
||||
dir, _, inAdditionalStore := d.dir2(id, false)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return "", err
|
||||
}
|
||||
workDirBase := dir
|
||||
if imageStore != "" {
|
||||
workDirBase = imageStore
|
||||
}
|
||||
|
||||
readWrite := !inAdditionalStore
|
||||
|
||||
if !d.SupportsShifting() || options.DisableShifting {
|
||||
@ -1564,7 +1525,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
}()
|
||||
|
||||
composeFsLayers := []string{}
|
||||
composeFsLayersDir := filepath.Join(workDirBase, "composefs-layers")
|
||||
composeFsLayersDir := filepath.Join(dir, "composefs-layers")
|
||||
maybeAddComposefsMount := func(lowerID string, i int, readWrite bool) (string, error) {
|
||||
composefsBlob := d.getComposefsData(lowerID)
|
||||
_, err = os.Stat(composefsBlob)
|
||||
@ -1598,7 +1559,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
return dest, nil
|
||||
}
|
||||
|
||||
diffDir := path.Join(workDirBase, "diff")
|
||||
diffDir := path.Join(dir, "diff")
|
||||
|
||||
if dest, err := maybeAddComposefsMount(id, 0, readWrite); err != nil {
|
||||
return "", err
|
||||
@ -1616,7 +1577,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
lower := ""
|
||||
newpath := path.Join(d.home, l)
|
||||
if st, err := os.Stat(newpath); err != nil {
|
||||
for _, p := range d.AdditionalImageStores() {
|
||||
for _, p := range d.getAllImageStores() {
|
||||
lower = path.Join(p, d.name, l)
|
||||
if st2, err2 := os.Stat(lower); err2 == nil {
|
||||
if !permsKnown {
|
||||
@ -1684,21 +1645,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
optsList = append(optsList, "metacopy=on", "redirect_dir=on")
|
||||
}
|
||||
|
||||
if len(absLowers) == 0 {
|
||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||
}
|
||||
|
||||
// user namespace requires this to move a directory from lower to upper.
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
|
||||
return "", err
|
||||
if len(absLowers) == 0 {
|
||||
absLowers = append(absLowers, path.Join(dir, "empty"))
|
||||
}
|
||||
|
||||
mergedDir := path.Join(workDirBase, "merged")
|
||||
if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil {
|
||||
if !inAdditionalStore {
|
||||
return "", err
|
||||
}
|
||||
// if it is in an additional store, do not fail if the directory already exists
|
||||
if _, err2 := os.Stat(diffDir); err2 != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
mergedDir := path.Join(dir, "merged")
|
||||
// Create the driver merged dir
|
||||
if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) {
|
||||
return "", err
|
||||
@ -1716,7 +1683,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
}
|
||||
}()
|
||||
|
||||
workdir := path.Join(workDirBase, "work")
|
||||
workdir := path.Join(dir, "work")
|
||||
|
||||
if d.options.mountProgram == "" && unshare.IsRootless() {
|
||||
optsList = append(optsList, "userxattr")
|
||||
@ -1866,7 +1833,7 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO
|
||||
|
||||
// Put unmounts the mount path created for the give id.
|
||||
func (d *Driver) Put(id string) error {
|
||||
dir := d.dir(id)
|
||||
dir, _, inAdditionalStore := d.dir2(id, false)
|
||||
if _, err := os.Stat(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1927,11 +1894,27 @@ func (d *Driver) Put(id string) error {
|
||||
}
|
||||
}
|
||||
|
||||
if err := unix.Rmdir(mountpoint); err != nil && !os.IsNotExist(err) {
|
||||
logrus.Debugf("Failed to remove mountpoint %s overlay: %s - %v", id, mountpoint, err)
|
||||
return fmt.Errorf("removing mount point %q: %w", mountpoint, err)
|
||||
}
|
||||
if !inAdditionalStore {
|
||||
uid, gid := int(0), int(0)
|
||||
fi, err := os.Stat(mountpoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stat, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
uid, gid = int(stat.Uid), int(stat.Gid)
|
||||
}
|
||||
|
||||
tmpMountpoint := path.Join(dir, "merged.1")
|
||||
if err := idtools.MkdirAs(tmpMountpoint, 0o700, uid, gid); err != nil && !errors.Is(err, os.ErrExist) {
|
||||
return err
|
||||
}
|
||||
// rename(2) can be used on an empty directory, as it is the mountpoint after umount, and it retains
|
||||
// its atomic semantic. In this way the "merged" directory is never removed.
|
||||
if err := unix.Rename(tmpMountpoint, mountpoint); err != nil {
|
||||
logrus.Debugf("Failed to replace mountpoint %s overlay: %s - %v", id, mountpoint, err)
|
||||
return fmt.Errorf("replacing mount point %q: %w", mountpoint, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2019,8 +2002,9 @@ func (g *overlayFileGetter) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) getStagingDir() string {
|
||||
return filepath.Join(d.home, stagingDir)
|
||||
func (d *Driver) getStagingDir(id string) string {
|
||||
_, homedir, _ := d.dir2(id, d.imageStore != "")
|
||||
return filepath.Join(homedir, stagingDir)
|
||||
}
|
||||
|
||||
// DiffGetter returns a FileGetCloser that can read files from the directory that
|
||||
@ -2077,15 +2061,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
var applyDir string
|
||||
|
||||
if id == "" {
|
||||
err := os.MkdirAll(d.getStagingDir(), 0o700)
|
||||
stagingDir := d.getStagingDir(id)
|
||||
err := os.MkdirAll(stagingDir, 0o700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
applyDir, err = os.MkdirTemp(d.getStagingDir(), "")
|
||||
applyDir, err = os.MkdirTemp(stagingDir, "")
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
perms := defaultPerms
|
||||
if d.options.forceMask != nil {
|
||||
perms = *d.options.forceMask
|
||||
}
|
||||
if err := os.Chmod(applyDir, perms); err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
applyDir, err = d.getDiffPath(id)
|
||||
@ -2101,6 +2092,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
}
|
||||
if d.usingComposefs {
|
||||
differOptions.Format = graphdriver.DifferOutputFormatFlat
|
||||
differOptions.UseFsVerity = graphdriver.DifferFsVerityEnabled
|
||||
}
|
||||
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
|
||||
UIDMaps: idMappings.UIDs(),
|
||||
@ -2116,27 +2108,38 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
}
|
||||
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||
stagingDirectory := diffOutput.Target
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir(id) {
|
||||
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
||||
}
|
||||
|
||||
if d.usingComposefs {
|
||||
// FIXME: move this logic into the differ so we don't have to open
|
||||
// the file twice.
|
||||
verityDigests, err := enableVerityRecursive(stagingDirectory)
|
||||
if err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) {
|
||||
logrus.Warningf("%s", err)
|
||||
}
|
||||
toc := diffOutput.Artifacts[tocArtifact]
|
||||
if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
diffPath, err := d.getDiffPath(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the current layer doesn't set the mode for the parent, override it with the parent layer's mode.
|
||||
if d.options.forceMask == nil && diffOutput.RootDirMode == nil && parent != "" {
|
||||
parentDiffPath, err := d.getDiffPath(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
parentSt, err := os.Stat(parentDiffPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Chmod(stagingDirectory, parentSt.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if d.usingComposefs {
|
||||
toc := diffOutput.Artifacts[tocArtifact]
|
||||
verityDigests := diffOutput.Artifacts[fsVerityDigestsArtifact].(map[string]string)
|
||||
if err := generateComposeFsBlob(verityDigests, toc, d.getComposefsData(id)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
@ -2193,12 +2196,8 @@ func (d *Driver) getComposefsData(id string) string {
|
||||
}
|
||||
|
||||
func (d *Driver) getDiffPath(id string) (string, error) {
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
base := dir
|
||||
if imagestore != "" {
|
||||
base = imagestore
|
||||
}
|
||||
return redirectDiffIfAdditionalLayer(path.Join(base, "diff"))
|
||||
dir := d.dir(id)
|
||||
return redirectDiffIfAdditionalLayer(path.Join(dir, "diff"))
|
||||
}
|
||||
|
||||
func (d *Driver) getLowerDiffPaths(id string) ([]string, error) {
|
||||
@ -2289,12 +2288,8 @@ func (d *Driver) AdditionalImageStores() []string {
|
||||
// by toContainer to those specified by toHost.
|
||||
func (d *Driver) UpdateLayerIDMap(id string, toContainer, toHost *idtools.IDMappings, mountLabel string) error {
|
||||
var err error
|
||||
dir, imagestore, _ := d.dir2(id)
|
||||
base := dir
|
||||
if imagestore != "" {
|
||||
base = imagestore
|
||||
}
|
||||
diffDir := filepath.Join(base, "diff")
|
||||
dir := d.dir(id)
|
||||
diffDir := filepath.Join(dir, "diff")
|
||||
|
||||
rootUID, rootGID := 0, 0
|
||||
if toHost != nil {
|
||||
|
13
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
13
vendor/github.com/containers/storage/drivers/overlay/overlay_nocgo.go
generated
vendored
@ -4,6 +4,7 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
@ -15,3 +16,15 @@ import (
|
||||
func (d *Driver) ReadWriteDiskUsage(id string) (*directory.DiskUsage, error) {
|
||||
return directory.Usage(path.Join(d.dir(id), "diff"))
|
||||
}
|
||||
|
||||
func getComposeFsHelper() (string, error) {
|
||||
return "", fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func mountComposefsBlob(dataDir, mountPoint string) error {
|
||||
return fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
||||
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
|
||||
return fmt.Errorf("composefs not supported on this build")
|
||||
}
|
||||
|
56
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
56
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -31,8 +31,9 @@ func init() {
|
||||
func Init(home string, options graphdriver.Options) (graphdriver.Driver, error) {
|
||||
d := &Driver{
|
||||
name: "vfs",
|
||||
homes: []string{home},
|
||||
home: home,
|
||||
idMappings: idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
imageStore: options.ImageStore,
|
||||
}
|
||||
|
||||
rootIDs := d.idMappings.RootPair()
|
||||
@ -47,7 +48,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
key = strings.ToLower(key)
|
||||
switch key {
|
||||
case "vfs.imagestore", ".imagestore":
|
||||
d.homes = append(d.homes, strings.Split(val, ",")...)
|
||||
d.additionalHomes = append(d.additionalHomes, strings.Split(val, ",")...)
|
||||
continue
|
||||
case "vfs.mountopt":
|
||||
return nil, fmt.Errorf("vfs driver does not support mount options")
|
||||
@ -62,12 +63,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
return nil, fmt.Errorf("vfs driver does not support %s options", key)
|
||||
}
|
||||
}
|
||||
// If --imagestore is provided, lets add writable graphRoot
|
||||
// to vfs's additional image store, as it is done for
|
||||
// `overlay` driver.
|
||||
if options.ImageStore != "" {
|
||||
d.homes = append(d.homes, options.ImageStore)
|
||||
}
|
||||
|
||||
d.updater = graphdriver.NewNaiveLayerIDMapUpdater(d)
|
||||
d.naiveDiff = graphdriver.NewNaiveDiffDriver(d, d.updater)
|
||||
|
||||
@ -80,11 +76,13 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver
|
||||
type Driver struct {
|
||||
name string
|
||||
homes []string
|
||||
home string
|
||||
additionalHomes []string
|
||||
idMappings *idtools.IDMappings
|
||||
ignoreChownErrors bool
|
||||
naiveDiff graphdriver.DiffDriver
|
||||
updater graphdriver.LayerIDMapUpdater
|
||||
imageStore string
|
||||
}
|
||||
|
||||
func (d *Driver) String() string {
|
||||
@ -158,7 +156,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
||||
idMappings = opts.IDMappings
|
||||
}
|
||||
|
||||
dir := d.dir(id)
|
||||
dir := d.dir2(id, ro)
|
||||
rootIDs := idMappings.RootPair()
|
||||
if err := idtools.MkdirAllAndChown(filepath.Dir(dir), 0o700, rootIDs); err != nil {
|
||||
return err
|
||||
@ -204,18 +202,32 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
for i, home := range d.homes {
|
||||
if i > 0 {
|
||||
home = filepath.Join(home, d.String())
|
||||
func (d *Driver) dir2(id string, useImageStore bool) string {
|
||||
var homedir string
|
||||
|
||||
if useImageStore && d.imageStore != "" {
|
||||
homedir = filepath.Join(d.imageStore, d.String(), "dir", filepath.Base(id))
|
||||
} else {
|
||||
homedir = filepath.Join(d.home, "dir", filepath.Base(id))
|
||||
}
|
||||
if _, err := os.Stat(homedir); err != nil {
|
||||
additionalHomes := d.additionalHomes[:]
|
||||
if d.imageStore != "" {
|
||||
additionalHomes = append(additionalHomes, d.imageStore)
|
||||
}
|
||||
candidate := filepath.Join(home, "dir", filepath.Base(id))
|
||||
fi, err := os.Stat(candidate)
|
||||
if err == nil && fi.IsDir() {
|
||||
return candidate
|
||||
for _, home := range additionalHomes {
|
||||
candidate := filepath.Join(home, d.String(), "dir", filepath.Base(id))
|
||||
fi, err := os.Stat(candidate)
|
||||
if err == nil && fi.IsDir() {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
}
|
||||
return filepath.Join(d.homes[0], "dir", filepath.Base(id))
|
||||
return homedir
|
||||
}
|
||||
|
||||
func (d *Driver) dir(id string) string {
|
||||
return d.dir2(id, false)
|
||||
}
|
||||
|
||||
// Remove deletes the content from the directory for a given id.
|
||||
@ -263,7 +275,7 @@ func (d *Driver) Exists(id string) bool {
|
||||
|
||||
// List layers (not including additional image stores)
|
||||
func (d *Driver) ListLayers() ([]string, error) {
|
||||
entries, err := os.ReadDir(filepath.Join(d.homes[0], "dir"))
|
||||
entries, err := os.ReadDir(filepath.Join(d.home, "dir"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -285,8 +297,8 @@ func (d *Driver) ListLayers() ([]string, error) {
|
||||
|
||||
// AdditionalImageStores returns additional image stores supported by the driver
|
||||
func (d *Driver) AdditionalImageStores() []string {
|
||||
if len(d.homes) > 1 {
|
||||
return d.homes[1:]
|
||||
if len(d.additionalHomes) > 0 {
|
||||
return d.additionalHomes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
126
vendor/github.com/containers/storage/layers.go
generated
vendored
126
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -181,6 +181,13 @@ type DiffOptions struct {
|
||||
Compression *archive.Compression
|
||||
}
|
||||
|
||||
// stagedLayerOptions are the options passed to .create to populate a staged
|
||||
// layer
|
||||
type stagedLayerOptions struct {
|
||||
DiffOutput *drivers.DriverWithDifferOutput
|
||||
DiffOptions *drivers.ApplyDiffWithDifferOpts
|
||||
}
|
||||
|
||||
// roLayerStore wraps a graph driver, adding the ability to refer to layers by
|
||||
// name, and keeping track of parent-child relationships, along with a list of
|
||||
// all known layers.
|
||||
@ -267,7 +274,7 @@ type rwLayerStore interface {
|
||||
// underlying drivers do not themselves distinguish between writeable
|
||||
// and read-only layers. Returns the new layer structure and the size of the
|
||||
// diff which was applied to its parent to initialize its contents.
|
||||
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (*Layer, int64, error)
|
||||
create(id string, parent *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error)
|
||||
|
||||
// updateNames modifies names associated with a layer based on (op, names).
|
||||
updateNames(id string, names []string, op updateNameOperation) error
|
||||
@ -312,8 +319,8 @@ type rwLayerStore interface {
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
// applyDiffFromStagingDirectory uses diffOutput.Target to create the diff.
|
||||
applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// DifferTarget gets the location where files are stored for the layer.
|
||||
DifferTarget(id string) (string, error)
|
||||
@ -327,10 +334,71 @@ type rwLayerStore interface {
|
||||
GarbageCollect() error
|
||||
}
|
||||
|
||||
type multipleLockFile struct {
|
||||
lockfiles []*lockfile.LockFile
|
||||
}
|
||||
|
||||
func (l multipleLockFile) Lock() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) RLock() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.RLock()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) Unlock() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) ModifiedSince(lastWrite lockfile.LastWrite) (lockfile.LastWrite, bool, error) {
|
||||
// Look up only the first lockfile, since this is the value returned by RecordWrite().
|
||||
return l.lockfiles[0].ModifiedSince(lastWrite)
|
||||
}
|
||||
|
||||
func (l multipleLockFile) AssertLockedForWriting() {
|
||||
for _, lock := range l.lockfiles {
|
||||
lock.AssertLockedForWriting()
|
||||
}
|
||||
}
|
||||
|
||||
func (l multipleLockFile) GetLastWrite() (lockfile.LastWrite, error) {
|
||||
return l.lockfiles[0].GetLastWrite()
|
||||
}
|
||||
|
||||
func (l multipleLockFile) RecordWrite() (lockfile.LastWrite, error) {
|
||||
var lastWrite *lockfile.LastWrite
|
||||
for _, lock := range l.lockfiles {
|
||||
lw, err := lock.RecordWrite()
|
||||
if err != nil {
|
||||
return lw, err
|
||||
}
|
||||
// Return the first value we get so we know that
|
||||
// all the locks have a write time >= to this one.
|
||||
if lastWrite == nil {
|
||||
lastWrite = &lw
|
||||
}
|
||||
}
|
||||
return *lastWrite, nil
|
||||
}
|
||||
|
||||
func (l multipleLockFile) IsReadWrite() bool {
|
||||
return l.lockfiles[0].IsReadWrite()
|
||||
}
|
||||
|
||||
func newMultipleLockFile(l ...*lockfile.LockFile) *multipleLockFile {
|
||||
return &multipleLockFile{lockfiles: l}
|
||||
}
|
||||
|
||||
type layerStore struct {
|
||||
// The following fields are only set when constructing layerStore, and must never be modified afterwards.
|
||||
// They are safe to access without any other locking.
|
||||
lockfile *lockfile.LockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
|
||||
lockfile *multipleLockFile // lockfile.IsReadWrite can be used to distinguish between read-write and read-only layer stores.
|
||||
mountsLockfile *lockfile.LockFile // Can _only_ be obtained with inProcessLock held.
|
||||
rundir string
|
||||
jsonPath [numLayerLocationIndex]string
|
||||
@ -1016,22 +1084,37 @@ func (r *layerStore) saveMounts() error {
|
||||
return r.loadMounts()
|
||||
}
|
||||
|
||||
func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
|
||||
func (s *store) newLayerStore(rundir, layerdir, imagedir string, driver drivers.Driver, transient bool) (rwLayerStore, error) {
|
||||
if err := os.MkdirAll(rundir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := os.MkdirAll(layerdir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if imagedir != "" {
|
||||
if err := os.MkdirAll(imagedir, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Note: While the containers.lock file is in rundir for transient stores
|
||||
// we don't want to do this here, because the non-transient layers in
|
||||
// layers.json might be used externally as a read-only layer (using e.g.
|
||||
// additionalimagestores), and that would look for the lockfile in the
|
||||
// same directory
|
||||
var lockFiles []*lockfile.LockFile
|
||||
lockFile, err := lockfile.GetLockFile(filepath.Join(layerdir, "layers.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockFiles = append(lockFiles, lockFile)
|
||||
if imagedir != "" {
|
||||
lockFile, err := lockfile.GetLockFile(filepath.Join(imagedir, "layers.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lockFiles = append(lockFiles, lockFile)
|
||||
}
|
||||
|
||||
mountsLockfile, err := lockfile.GetLockFile(filepath.Join(rundir, "mountpoints.lock"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1041,7 +1124,7 @@ func (s *store) newLayerStore(rundir string, layerdir string, driver drivers.Dri
|
||||
volatileDir = rundir
|
||||
}
|
||||
rlstore := layerStore{
|
||||
lockfile: lockFile,
|
||||
lockfile: newMultipleLockFile(lockFiles...),
|
||||
mountsLockfile: mountsLockfile,
|
||||
rundir: rundir,
|
||||
jsonPath: [numLayerLocationIndex]string{
|
||||
@ -1078,7 +1161,7 @@ func newROLayerStore(rundir string, layerdir string, driver drivers.Driver) (roL
|
||||
return nil, err
|
||||
}
|
||||
rlstore := layerStore{
|
||||
lockfile: lockfile,
|
||||
lockfile: newMultipleLockFile(lockfile),
|
||||
mountsLockfile: nil,
|
||||
rundir: rundir,
|
||||
jsonPath: [numLayerLocationIndex]string{
|
||||
@ -1232,7 +1315,7 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader) (layer *Layer, size int64, err error) {
|
||||
func (r *layerStore) create(id string, parentLayer *Layer, names []string, mountLabel string, options map[string]string, moreOptions *LayerOptions, writeable bool, diff io.Reader, slo *stagedLayerOptions) (layer *Layer, size int64, err error) {
|
||||
if moreOptions == nil {
|
||||
moreOptions = &LayerOptions{}
|
||||
}
|
||||
@ -1426,6 +1509,11 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
cleanupFailureContext = "applying layer diff"
|
||||
return nil, -1, err
|
||||
}
|
||||
} else if slo != nil {
|
||||
if err := r.applyDiffFromStagingDirectory(layer.ID, slo.DiffOutput, slo.DiffOptions); err != nil {
|
||||
cleanupFailureContext = "applying staged directory diff"
|
||||
return nil, -1, err
|
||||
}
|
||||
} else {
|
||||
// applyDiffWithOptions() would have updated r.bycompressedsum
|
||||
// and r.byuncompressedsum for us, but if we used a template
|
||||
@ -2286,7 +2374,7 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
||||
if layerOptions != nil && layerOptions.UncompressedDigest != "" &&
|
||||
layerOptions.UncompressedDigest.Algorithm() == digest.Canonical {
|
||||
uncompressedDigest = layerOptions.UncompressedDigest
|
||||
} else {
|
||||
} else if compression != archive.Uncompressed {
|
||||
uncompressedDigester = digest.Canonical.Digester()
|
||||
}
|
||||
|
||||
@ -2365,10 +2453,17 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
||||
if uncompressedDigester != nil {
|
||||
uncompressedDigest = uncompressedDigester.Digest()
|
||||
}
|
||||
if uncompressedDigest == "" && compression == archive.Uncompressed {
|
||||
uncompressedDigest = compressedDigest
|
||||
}
|
||||
|
||||
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
|
||||
layer.CompressedDigest = compressedDigest
|
||||
layer.CompressedSize = compressedCounter.Count
|
||||
if layerOptions != nil && layerOptions.OriginalDigest != "" && layerOptions.OriginalSize != nil {
|
||||
layer.CompressedSize = *layerOptions.OriginalSize
|
||||
} else {
|
||||
layer.CompressedSize = compressedCounter.Count
|
||||
}
|
||||
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, uncompressedDigest, layer.ID)
|
||||
layer.UncompressedDigest = uncompressedDigest
|
||||
layer.UncompressedSize = uncompressedCounter.Count
|
||||
@ -2407,7 +2502,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
func (r *layerStore) applyDiffFromStagingDirectory(id string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
|
||||
if !ok {
|
||||
return ErrNotSupported
|
||||
@ -2426,7 +2521,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
}
|
||||
}
|
||||
|
||||
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
|
||||
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, diffOutput, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -2446,6 +2541,10 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
layer.Flags[k] = v
|
||||
}
|
||||
}
|
||||
if err = r.saveFor(layer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(diffOutput.TarSplit) != 0 {
|
||||
tsdata := bytes.Buffer{}
|
||||
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
||||
@ -2475,9 +2574,6 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = r.saveFor(layer); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
35
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
35
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -339,12 +339,43 @@ func (compression *Compression) Extension() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
|
||||
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
|
||||
// glibc.
|
||||
type nosysFileInfo struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (fi nosysFileInfo) Sys() interface{} {
|
||||
// A Sys value of type *tar.Header is safe as it is system-independent.
|
||||
// The tar.FileInfoHeader function copies the fields into the returned
|
||||
// header without performing any OS lookups.
|
||||
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
|
||||
return sys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sysStatOverride, if non-nil, populates hdr from system-dependent fields of fi.
|
||||
var sysStatOverride func(fi os.FileInfo, hdr *tar.Header) error
|
||||
|
||||
func fileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
|
||||
if sysStatOverride == nil {
|
||||
return tar.FileInfoHeader(fi, link)
|
||||
}
|
||||
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hdr, sysStatOverride(fi, hdr)
|
||||
}
|
||||
|
||||
// FileInfoHeader creates a populated Header from fi.
|
||||
// Compared to archive pkg this function fills in more information.
|
||||
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
|
||||
// which have been deleted since Go 1.9 archive/tar.
|
||||
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
|
||||
hdr, err := tar.FileInfoHeader(fi, link)
|
||||
hdr, err := fileInfoHeaderNoLookups(fi, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -385,7 +416,7 @@ func ReadUserXattrToTarHeader(path string, hdr *tar.Header) error {
|
||||
return err
|
||||
}
|
||||
for _, key := range xattrs {
|
||||
if strings.HasPrefix(key, "user.") {
|
||||
if strings.HasPrefix(key, "user.") && !strings.HasPrefix(key, "user.overlay.") {
|
||||
value, err := system.Lgetxattr(path, key)
|
||||
if err != nil {
|
||||
if errors.Is(err, system.E2BIG) {
|
||||
|
25
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
25
vendor/github.com/containers/storage/pkg/archive/archive_unix.go
generated
vendored
@ -15,6 +15,31 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStatOverride = statUnix
|
||||
}
|
||||
|
||||
// statUnix populates hdr from system-dependent fields of fi without performing
|
||||
// any OS lookups.
|
||||
// Adapted from Moby.
|
||||
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
hdr.Uid = int(s.Uid)
|
||||
hdr.Gid = int(s.Gid)
|
||||
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
|
26
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
26
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
|
||||
const (
|
||||
cacheKey = "chunked-manifest-cache"
|
||||
cacheVersion = 1
|
||||
cacheVersion = 2
|
||||
|
||||
digestSha256Empty = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
)
|
||||
@ -207,9 +207,9 @@ func calculateHardLinkFingerprint(f *internal.FileMetadata) (string, error) {
|
||||
return string(digester.Digest()), nil
|
||||
}
|
||||
|
||||
// generateFileLocation generates a file location in the form $OFFSET@$PATH
|
||||
func generateFileLocation(path string, offset uint64) []byte {
|
||||
return []byte(fmt.Sprintf("%d@%s", offset, path))
|
||||
// generateFileLocation generates a file location in the form $OFFSET:$LEN:$PATH
|
||||
func generateFileLocation(path string, offset, len uint64) []byte {
|
||||
return []byte(fmt.Sprintf("%d:%d:%s", offset, len, path))
|
||||
}
|
||||
|
||||
// generateTag generates a tag in the form $DIGEST$OFFSET@LEN.
|
||||
@ -245,7 +245,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||
var tags []string
|
||||
for _, k := range toc {
|
||||
if k.Digest != "" {
|
||||
location := generateFileLocation(k.Name, 0)
|
||||
location := generateFileLocation(k.Name, 0, uint64(k.Size))
|
||||
|
||||
off := uint64(vdata.Len())
|
||||
l := uint64(len(location))
|
||||
@ -276,7 +276,7 @@ func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id strin
|
||||
digestLen = len(k.Digest)
|
||||
}
|
||||
if k.ChunkDigest != "" {
|
||||
location := generateFileLocation(k.Name, uint64(k.ChunkOffset))
|
||||
location := generateFileLocation(k.Name, uint64(k.ChunkOffset), uint64(k.ChunkSize))
|
||||
off := uint64(vdata.Len())
|
||||
l := uint64(len(location))
|
||||
d := generateTag(k.ChunkDigest, off, l)
|
||||
@ -490,7 +490,9 @@ func findTag(digest string, metadata *metadata) (string, uint64, uint64) {
|
||||
if digest == d {
|
||||
startOff := i*metadata.tagLen + metadata.digestLen
|
||||
parts := strings.Split(string(metadata.tags[startOff:(i+1)*metadata.tagLen]), "@")
|
||||
|
||||
off, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||
|
||||
len, _ := strconv.ParseInt(parts[1], 10, 64)
|
||||
return digest, uint64(off), uint64(len)
|
||||
}
|
||||
@ -507,12 +509,16 @@ func (c *layersCache) findDigestInternal(digest string) (string, string, int64,
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
for _, layer := range c.layers {
|
||||
digest, off, len := findTag(digest, layer.metadata)
|
||||
digest, off, tagLen := findTag(digest, layer.metadata)
|
||||
if digest != "" {
|
||||
position := string(layer.metadata.vdata[off : off+len])
|
||||
parts := strings.SplitN(position, "@", 2)
|
||||
position := string(layer.metadata.vdata[off : off+tagLen])
|
||||
parts := strings.SplitN(position, ":", 3)
|
||||
if len(parts) != 3 {
|
||||
continue
|
||||
}
|
||||
offFile, _ := strconv.ParseInt(parts[0], 10, 64)
|
||||
return layer.target, parts[1], offFile, nil
|
||||
// parts[1] is the chunk length, currently unused.
|
||||
return layer.target, parts[2], offFile, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/chunked/compression_linux.go
generated
vendored
@ -257,8 +257,8 @@ func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, ann
|
||||
return decodedBlob, decodedTarSplit, int64(footerData.Offset), err
|
||||
}
|
||||
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) {
|
||||
d, err := digest.Parse(expectedUncompressedChecksum)
|
||||
func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedCompressedChecksum string) ([]byte, error) {
|
||||
d, err := digest.Parse(expectedCompressedChecksum)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
24
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
24
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
@ -93,13 +94,18 @@ func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
return mode, nil
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := strings.TrimRight(entry.Name, "/")
|
||||
if path == "" {
|
||||
func sanitizeName(name string) string {
|
||||
path := filepath.Clean(name)
|
||||
if path == "." {
|
||||
path = "/"
|
||||
} else if path[0] != '/' {
|
||||
path = "/" + path
|
||||
}
|
||||
return path
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := sanitizeName(entry.Name)
|
||||
|
||||
if _, err := fmt.Fprint(out, escaped(path, ESCAPE_STANDARD)); err != nil {
|
||||
return err
|
||||
@ -133,9 +139,10 @@ func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]stri
|
||||
|
||||
var payload string
|
||||
if entry.Linkname != "" {
|
||||
payload = entry.Linkname
|
||||
if entry.Type == internal.TypeLink && payload[0] != '/' {
|
||||
payload = "/" + payload
|
||||
if entry.Type == internal.TypeSymlink {
|
||||
payload = entry.Linkname
|
||||
} else {
|
||||
payload = sanitizeName(entry.Linkname)
|
||||
}
|
||||
} else {
|
||||
if len(entry.Digest) > 10 {
|
||||
@ -198,10 +205,13 @@ func GenerateDump(tocI interface{}, verityDigests map[string]string) (io.Reader,
|
||||
if e.Linkname == "" {
|
||||
continue
|
||||
}
|
||||
if e.Type == internal.TypeSymlink {
|
||||
continue
|
||||
}
|
||||
links[e.Linkname] = links[e.Linkname] + 1
|
||||
}
|
||||
|
||||
if len(toc.Entries) == 0 || (toc.Entries[0].Name != "" && toc.Entries[0].Name != "/") {
|
||||
if len(toc.Entries) == 0 || (sanitizeName(toc.Entries[0].Name) != "/") {
|
||||
root := &internal.FileMetadata{
|
||||
Name: "/",
|
||||
Type: internal.TypeDir,
|
||||
|
194
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
194
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/chunked/compressor"
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
"github.com/containers/storage/pkg/fsverity"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/containers/storage/types"
|
||||
@ -46,6 +47,7 @@ const (
|
||||
chunkedData = "zstd-chunked-data"
|
||||
chunkedLayerDataKey = "zstd-chunked-layer-data"
|
||||
tocKey = "toc"
|
||||
fsVerityDigestsKey = "fs-verity-digests"
|
||||
|
||||
fileTypeZstdChunked = iota
|
||||
fileTypeEstargz
|
||||
@ -71,11 +73,9 @@ type chunkedDiffer struct {
|
||||
zstdReader *zstd.Decoder
|
||||
rawReader io.Reader
|
||||
|
||||
// contentDigest is the digest of the uncompressed content
|
||||
// (diffID) when the layer is fully retrieved. If the layer
|
||||
// is not fully retrieved, instead of using the digest of the
|
||||
// uncompressed content, it refers to the digest of the TOC.
|
||||
contentDigest digest.Digest
|
||||
// tocDigest is the digest of the TOC document when the layer
|
||||
// is partially pulled.
|
||||
tocDigest digest.Digest
|
||||
|
||||
// convertedToZstdChunked is set to true if the layer needs to
|
||||
// be converted to the zstd:chunked format before it can be
|
||||
@ -94,6 +94,10 @@ type chunkedDiffer struct {
|
||||
blobSize int64
|
||||
|
||||
storeOpts *types.StoreOptions
|
||||
|
||||
useFsVerity graphdriver.DifferFsVerity
|
||||
fsVerityDigests map[string]string
|
||||
fsVerityMutex sync.Mutex
|
||||
}
|
||||
|
||||
var xattrsToIgnore = map[string]interface{}{
|
||||
@ -237,6 +241,10 @@ func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Diges
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !parseBooleanPullOption(&storeOpts, "enable_partial_images", true) {
|
||||
return nil, errors.New("enable_partial_images not configured")
|
||||
}
|
||||
|
||||
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
|
||||
@ -265,6 +273,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDige
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
convertToZstdChunked: true,
|
||||
@ -285,22 +294,23 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
|
||||
tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err)
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
blobSize: blobSize,
|
||||
contentDigest: contentDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
storeOpts: storeOpts,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeZstdChunked,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
storeOpts: storeOpts,
|
||||
stream: iss,
|
||||
tarSplit: tarSplit,
|
||||
tocOffset: tocOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -314,21 +324,22 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err)
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
blobSize: blobSize,
|
||||
contentDigest: contentDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
storeOpts: storeOpts,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
fsVerityDigests: make(map[string]string),
|
||||
blobSize: blobSize,
|
||||
tocDigest: tocDigest,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
fileType: fileTypeEstargz,
|
||||
layersCache: layersCache,
|
||||
manifest: manifest,
|
||||
storeOpts: storeOpts,
|
||||
stream: iss,
|
||||
tocOffset: tocOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -925,6 +936,8 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT
|
||||
return nil
|
||||
}
|
||||
|
||||
type recordFsVerityFunc func(string, *os.File) error
|
||||
|
||||
type destinationFile struct {
|
||||
digester digest.Digester
|
||||
dirfd int
|
||||
@ -934,9 +947,10 @@ type destinationFile struct {
|
||||
options *archive.TarOptions
|
||||
skipValidation bool
|
||||
to io.Writer
|
||||
recordFsVerity recordFsVerityFunc
|
||||
}
|
||||
|
||||
func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) {
|
||||
func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool, recordFsVerity recordFsVerityFunc) (*destinationFile, error) {
|
||||
file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -963,15 +977,32 @@ func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *ar
|
||||
options: options,
|
||||
dirfd: dirfd,
|
||||
skipValidation: skipValidation,
|
||||
recordFsVerity: recordFsVerity,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *destinationFile) Close() (Err error) {
|
||||
defer func() {
|
||||
err := d.file.Close()
|
||||
var roFile *os.File
|
||||
var err error
|
||||
|
||||
if d.recordFsVerity != nil {
|
||||
roFile, err = reopenFileReadOnly(d.file)
|
||||
if err == nil {
|
||||
defer roFile.Close()
|
||||
} else if Err == nil {
|
||||
Err = err
|
||||
}
|
||||
}
|
||||
|
||||
err = d.file.Close()
|
||||
if Err == nil {
|
||||
Err = err
|
||||
}
|
||||
|
||||
if Err == nil && roFile != nil {
|
||||
Err = d.recordFsVerity(d.metadata.Name, roFile)
|
||||
}
|
||||
}()
|
||||
|
||||
if !d.skipValidation {
|
||||
@ -994,6 +1025,35 @@ func closeDestinationFiles(files chan *destinationFile, errors chan error) {
|
||||
close(errors)
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) recordFsVerity(path string, roFile *os.File) error {
|
||||
if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
|
||||
return nil
|
||||
}
|
||||
// fsverity.EnableVerity doesn't return an error if fs-verity was already
|
||||
// enabled on the file.
|
||||
err := fsverity.EnableVerity(path, int(roFile.Fd()))
|
||||
if err != nil {
|
||||
if c.useFsVerity == graphdriver.DifferFsVerityRequired {
|
||||
return err
|
||||
}
|
||||
|
||||
// If it is not required, ignore the error if the filesystem does not support it.
|
||||
if errors.Is(err, unix.ENOTSUP) || errors.Is(err, unix.ENOTTY) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
verity, err := fsverity.MeasureVerity(path, int(roFile.Fd()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.fsVerityMutex.Lock()
|
||||
c.fsVerityDigests[path] = verity
|
||||
c.fsVerityMutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan error, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) (Err error) {
|
||||
var destFile *destinationFile
|
||||
|
||||
@ -1081,7 +1141,11 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan
|
||||
}
|
||||
filesToClose <- destFile
|
||||
}
|
||||
destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation)
|
||||
recordFsVerity := c.recordFsVerity
|
||||
if c.useFsVerity == graphdriver.DifferFsVerityDisabled {
|
||||
recordFsVerity = nil
|
||||
}
|
||||
destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation, recordFsVerity)
|
||||
if err != nil {
|
||||
Err = err
|
||||
goto exit
|
||||
@ -1412,15 +1476,39 @@ type findAndCopyFileOptions struct {
|
||||
options *archive.TarOptions
|
||||
}
|
||||
|
||||
func reopenFileReadOnly(f *os.File) (*os.File, error) {
|
||||
path := fmt.Sprintf("/proc/self/fd/%d", f.Fd())
|
||||
fd, err := unix.Open(path, unix.O_RDONLY|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return os.NewFile(uintptr(fd), f.Name()), nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, copyOptions *findAndCopyFileOptions, mode os.FileMode) (bool, error) {
|
||||
finalizeFile := func(dstFile *os.File) error {
|
||||
if dstFile != nil {
|
||||
defer dstFile.Close()
|
||||
if err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if dstFile == nil {
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
err := setFileAttrs(dirfd, dstFile, mode, r, copyOptions.options, false)
|
||||
if err != nil {
|
||||
dstFile.Close()
|
||||
return err
|
||||
}
|
||||
var roFile *os.File
|
||||
if c.useFsVerity != graphdriver.DifferFsVerityDisabled {
|
||||
roFile, err = reopenFileReadOnly(dstFile)
|
||||
}
|
||||
dstFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if roFile == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer roFile.Close()
|
||||
return c.recordFsVerity(r.Name, roFile)
|
||||
}
|
||||
|
||||
found, dstFile, _, err := findFileInOtherLayers(c.layersCache, r, dirfd, copyOptions.useHardLinks)
|
||||
@ -1522,9 +1610,13 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
}
|
||||
}()
|
||||
|
||||
c.useFsVerity = differOpts.UseFsVerity
|
||||
|
||||
// stream to use for reading the zstd:chunked or Estargz file.
|
||||
stream := c.stream
|
||||
|
||||
var uncompressedDigest digest.Digest
|
||||
|
||||
if c.convertToZstdChunked {
|
||||
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
if err != nil {
|
||||
@ -1575,13 +1667,12 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
c.fileType = fileTypeZstdChunked
|
||||
c.manifest = manifest
|
||||
c.tarSplit = tarSplit
|
||||
|
||||
// since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest.
|
||||
c.contentDigest = diffID
|
||||
c.tocOffset = tocOffset
|
||||
|
||||
// the file was generated by us and the digest for each file was already computed, no need to validate it again.
|
||||
c.skipValidation = true
|
||||
// since we retrieved the whole file and it was validated, set the uncompressed digest.
|
||||
uncompressedDigest = diffID
|
||||
}
|
||||
|
||||
lcd := chunkedLayerData{
|
||||
@ -1610,11 +1701,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
Artifacts: map[string]interface{}{
|
||||
tocKey: toc,
|
||||
},
|
||||
TOCDigest: c.contentDigest,
|
||||
}
|
||||
|
||||
if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) {
|
||||
return output, errors.New("enable_partial_images not configured")
|
||||
TOCDigest: c.tocDigest,
|
||||
UncompressedDigest: uncompressedDigest,
|
||||
}
|
||||
|
||||
// When the hard links deduplication is used, file attributes are ignored because setting them
|
||||
@ -1731,13 +1819,17 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
|
||||
mode := os.FileMode(r.Mode)
|
||||
|
||||
r.Name = filepath.Clean(r.Name)
|
||||
r.Linkname = filepath.Clean(r.Linkname)
|
||||
|
||||
t, err := typeToTarType(r.Type)
|
||||
if err != nil {
|
||||
return output, err
|
||||
}
|
||||
|
||||
r.Name = filepath.Clean(r.Name)
|
||||
// do not modify the value of symlinks
|
||||
if r.Linkname != "" && t != tar.TypeSymlink {
|
||||
r.Linkname = filepath.Clean(r.Linkname)
|
||||
}
|
||||
|
||||
if whiteoutConverter != nil {
|
||||
hdr := archivetar.Header{
|
||||
Typeflag: t,
|
||||
@ -1783,6 +1875,9 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
}
|
||||
|
||||
case tar.TypeDir:
|
||||
if r.Name == "" || r.Name == "." {
|
||||
output.RootDirMode = &mode
|
||||
}
|
||||
if err := safeMkdir(dirfd, mode, r.Name, &r, options); err != nil {
|
||||
return output, err
|
||||
}
|
||||
@ -1920,6 +2015,8 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
logrus.Debugf("Missing %d bytes out of %d (%.2f %%)", missingPartsSize, totalChunksSize, float32(missingPartsSize*100.0)/float32(totalChunksSize))
|
||||
}
|
||||
|
||||
output.Artifacts[fsVerityDigestsKey] = c.fsVerityDigests
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
@ -1979,7 +2076,10 @@ func (c *chunkedDiffer) mergeTocEntries(fileType compressedFileType, entries []i
|
||||
|
||||
e.Chunks = make([]*internal.FileMetadata, nChunks+1)
|
||||
for j := 0; j <= nChunks; j++ {
|
||||
e.Chunks[j] = &entries[i+j]
|
||||
// we need a copy here, otherwise we override the
|
||||
// .Size later
|
||||
copy := entries[i+j]
|
||||
e.Chunks[j] = ©
|
||||
e.EndOffset = entries[i+j].EndOffset
|
||||
}
|
||||
i += nChunks
|
||||
|
19
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
19
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
@ -1,6 +1,8 @@
|
||||
package toc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -16,19 +18,24 @@ const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
||||
// table of contents (TOC) from the image's annotations.
|
||||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok {
|
||||
d, err := digest.Parse(contentDigest)
|
||||
d1, ok1 := annotations[tocJSONDigestAnnotation]
|
||||
d2, ok2 := annotations[internal.ManifestChecksumKey]
|
||||
switch {
|
||||
case ok1 && ok2:
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
case ok1:
|
||||
d, err := digest.Parse(d1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
d, err := digest.Parse(contentDigest)
|
||||
case ok2:
|
||||
d, err := digest.Parse(d2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
45
vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go
generated
vendored
Normal file
45
vendor/github.com/containers/storage/pkg/fsverity/fsverity_linux.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package fsverity
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// verityDigest struct represents the digest used for verifying the integrity of a file.
|
||||
type verityDigest struct {
|
||||
Fsv unix.FsverityDigest
|
||||
Buf [64]byte
|
||||
}
|
||||
|
||||
// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
|
||||
// in read-only mode.
|
||||
// The 'description' parameter is a human-readable description of the file.
|
||||
func EnableVerity(description string, fd int) error {
|
||||
enableArg := unix.FsverityEnableArg{
|
||||
Version: 1,
|
||||
Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256,
|
||||
Block_size: 4096,
|
||||
}
|
||||
|
||||
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg)))
|
||||
if e1 != 0 && !errors.Is(e1, unix.EEXIST) {
|
||||
return fmt.Errorf("failed to enable verity for %q: %w", description, e1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
|
||||
// The 'description' parameter is a human-readable description of the file.
|
||||
func MeasureVerity(description string, fd int) (string, error) {
|
||||
var digest verityDigest
|
||||
digest.Fsv.Size = 64
|
||||
_, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest)))
|
||||
if e1 != 0 {
|
||||
return "", fmt.Errorf("failed to measure verity for %q: %w", description, e1)
|
||||
}
|
||||
return fmt.Sprintf("%x", digest.Buf[:digest.Fsv.Size]), nil
|
||||
}
|
21
vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
generated
vendored
Normal file
21
vendor/github.com/containers/storage/pkg/fsverity/fsverity_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package fsverity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// EnableVerity enables the verity feature on a file represented by the file descriptor 'fd'. The file must be opened
|
||||
// in read-only mode.
|
||||
// The 'description' parameter is a human-readable description of the file.
|
||||
func EnableVerity(description string, fd int) error {
|
||||
return fmt.Errorf("fs-verity is not supported on this platform")
|
||||
}
|
||||
|
||||
// MeasureVerity measures and returns the verity digest for the file represented by 'fd'.
|
||||
// The 'description' parameter is a human-readable description of the file.
|
||||
func MeasureVerity(description string, fd int) (string, error) {
|
||||
return "", fmt.Errorf("fs-verity is not supported on this platform")
|
||||
}
|
4
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
4
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build !linux && !darwin && !freebsd
|
||||
// +build !linux,!darwin,!freebsd
|
||||
//go:build !linux && !darwin && !freebsd && !windows
|
||||
// +build !linux,!darwin,!freebsd,!windows
|
||||
|
||||
package homedir
|
||||
|
||||
|
29
vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
generated
vendored
29
vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go
generated
vendored
@ -5,6 +5,7 @@ package homedir
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Key returns the env var name for the user's home dir based on
|
||||
@ -25,8 +26,36 @@ func Get() string {
|
||||
return home
|
||||
}
|
||||
|
||||
// GetConfigHome returns the home directory of the current user with the help of
|
||||
// environment variables depending on the target operating system.
|
||||
// Returned path should be used with "path/filepath" to form new paths.
|
||||
func GetConfigHome() (string, error) {
|
||||
return filepath.Join(Get(), ".config"), nil
|
||||
}
|
||||
|
||||
// GetShortcutString returns the string that is shortcut to user's home directory
|
||||
// in the native shell of the platform running on.
|
||||
func GetShortcutString() string {
|
||||
return "%USERPROFILE%" // be careful while using in format functions
|
||||
}
|
||||
|
||||
// StickRuntimeDirContents is a no-op on Windows
|
||||
func StickRuntimeDirContents(files []string) ([]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetRuntimeDir returns a directory suitable to store runtime files.
|
||||
// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
|
||||
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
||||
// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
|
||||
// directory for the current user.
|
||||
//
|
||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
func GetRuntimeDir() (string, error) {
|
||||
data, err := GetDataHome()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
runtimeDir := filepath.Join(data, "containers", "storage")
|
||||
return runtimeDir, nil
|
||||
}
|
||||
|
2
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go
generated
vendored
@ -14,7 +14,7 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/moby/sys/user"
|
||||
)
|
||||
|
||||
var (
|
||||
|
9
vendor/github.com/containers/storage/storage.conf
generated
vendored
9
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -59,7 +59,7 @@ additionalimagestores = [
|
||||
# can deduplicate pulling of content, disk storage of content and can allow the
|
||||
# kernel to use less memory when running containers.
|
||||
|
||||
# containers/storage supports three keys
|
||||
# containers/storage supports four keys
|
||||
# * enable_partial_images="true" | "false"
|
||||
# Tells containers/storage to look for files previously pulled in storage
|
||||
# rather then always pulling them from the container registry.
|
||||
@ -70,7 +70,12 @@ additionalimagestores = [
|
||||
# Tells containers/storage where an ostree repository exists that might have
|
||||
# previously pulled content which can be used when attempting to avoid
|
||||
# pulling content from the container registry
|
||||
pull_options = {enable_partial_images = "false", use_hard_links = "false", ostree_repos=""}
|
||||
# * convert_images = "false" | "true"
|
||||
# If set to true, containers/storage will convert images to a
|
||||
# format compatible with partial pulls in order to take advantage
|
||||
# of local deduplication and hard linking. It is an expensive
|
||||
# operation so it is not enabled by default.
|
||||
pull_options = {enable_partial_images = "true", use_hard_links = "false", ostree_repos=""}
|
||||
|
||||
# Remap-UIDs/GIDs is the mapping from UIDs/GIDs as they should appear inside of
|
||||
# a container, to the UIDs/GIDs as they should appear outside of the container,
|
||||
|
172
vendor/github.com/containers/storage/store.go
generated
vendored
172
vendor/github.com/containers/storage/store.go
generated
vendored
@ -71,6 +71,19 @@ type metadataStore interface {
|
||||
rwMetadataStore
|
||||
}
|
||||
|
||||
// ApplyStagedLayerOptions contains options to pass to ApplyStagedLayer
|
||||
type ApplyStagedLayerOptions struct {
|
||||
ID string // Mandatory
|
||||
ParentLayer string // Optional
|
||||
Names []string // Optional
|
||||
MountLabel string // Optional
|
||||
Writeable bool // Optional
|
||||
LayerOptions *LayerOptions // Optional
|
||||
|
||||
DiffOutput *drivers.DriverWithDifferOutput // Mandatory
|
||||
DiffOptions *drivers.ApplyDiffWithDifferOpts // Mandatory
|
||||
}
|
||||
|
||||
// An roBigDataStore wraps up the read-only big-data related methods of the
|
||||
// various types of file-based lookaside stores that we implement.
|
||||
type roBigDataStore interface {
|
||||
@ -318,11 +331,21 @@ type Store interface {
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
// Deprecated: it will be removed soon. Use ApplyStagedLayer instead.
|
||||
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
// Deprecated: it will be removed soon. Use CleanupStagedLayer instead.
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
||||
// ApplyStagedLayer combines the functions of CreateLayer and ApplyDiffFromStagingDirectory,
|
||||
// marking the layer for automatic removal if applying the diff fails
|
||||
// for any reason.
|
||||
ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error)
|
||||
|
||||
// CleanupStagedLayer cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error
|
||||
|
||||
// DifferTarget gets the path to the differ target.
|
||||
DifferTarget(id string) (string, error)
|
||||
|
||||
@ -397,6 +420,18 @@ type Store interface {
|
||||
// allow ImagesByDigest to find images by their correct digests.
|
||||
SetImageBigData(id, key string, data []byte, digestManifest func([]byte) (digest.Digest, error)) error
|
||||
|
||||
// ImageDirectory returns a path of a directory which the caller can
|
||||
// use to store data, specific to the image, which the library does not
|
||||
// directly manage. The directory will be deleted when the image is
|
||||
// deleted.
|
||||
ImageDirectory(id string) (string, error)
|
||||
|
||||
// ImageRunDirectory returns a path of a directory which the caller can
|
||||
// use to store data, specific to the image, which the library does not
|
||||
// directly manage. The directory will be deleted when the host system
|
||||
// is restarted.
|
||||
ImageRunDirectory(id string) (string, error)
|
||||
|
||||
// ListLayerBigData retrieves a list of the (possibly large) chunks of
|
||||
// named data associated with a layer.
|
||||
ListLayerBigData(id string) ([]string, error)
|
||||
@ -568,10 +603,19 @@ type LayerOptions struct {
|
||||
// initialize this layer. If set, it should be a child of the layer
|
||||
// which we want to use as the parent of the new layer.
|
||||
TemplateLayer string
|
||||
// OriginalDigest specifies a digest of the tarstream (diff), if one is
|
||||
// OriginalDigest specifies a digest of the (possibly-compressed) tarstream (diff), if one is
|
||||
// provided along with these LayerOptions, and reliably known by the caller.
|
||||
// The digest might not be exactly the digest of the provided tarstream
|
||||
// (e.g. the digest might be of a compressed representation, while providing
|
||||
// an uncompressed one); in that case the caller is responsible for the two matching.
|
||||
// Use the default "" if this fields is not applicable or the value is not known.
|
||||
OriginalDigest digest.Digest
|
||||
// OriginalSize specifies a size of the (possibly-compressed) tarstream corresponding
|
||||
// to OriginalDigest.
|
||||
// If the digest does not match the provided tarstream, OriginalSize must match OriginalDigest,
|
||||
// not the tarstream.
|
||||
// Use nil if not applicable or not known.
|
||||
OriginalSize *int64
|
||||
// UncompressedDigest specifies a digest of the uncompressed version (“DiffID”)
|
||||
// of the tarstream (diff), if one is provided along with these LayerOptions,
|
||||
// and reliably known by the caller.
|
||||
@ -928,11 +972,13 @@ func (s *store) load() error {
|
||||
if err := os.MkdirAll(gipath, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
ris, err := newImageStore(gipath)
|
||||
imageStore, err := newImageStore(gipath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.imageStore = ris
|
||||
s.imageStore = imageStore
|
||||
|
||||
s.rwImageStores = []rwImageStore{imageStore}
|
||||
|
||||
gcpath := filepath.Join(s.graphRoot, driverPrefix+"containers")
|
||||
if err := os.MkdirAll(gcpath, 0o700); err != nil {
|
||||
@ -950,13 +996,16 @@ func (s *store) load() error {
|
||||
|
||||
s.containerStore = rcs
|
||||
|
||||
for _, store := range driver.AdditionalImageStores() {
|
||||
additionalImageStores := s.graphDriver.AdditionalImageStores()
|
||||
if s.imageStoreDir != "" {
|
||||
additionalImageStores = append([]string{s.graphRoot}, additionalImageStores...)
|
||||
}
|
||||
|
||||
for _, store := range additionalImageStores {
|
||||
gipath := filepath.Join(store, driverPrefix+"images")
|
||||
var ris roImageStore
|
||||
if s.imageStoreDir != "" && store == s.graphRoot {
|
||||
// If --imagestore was set and current store
|
||||
// is `graphRoot` then mount it as a `rw` additional
|
||||
// store instead of `readonly` additional store.
|
||||
// both the graphdriver and the imagestore must be used read-write.
|
||||
if store == s.imageStoreDir || store == s.graphRoot {
|
||||
imageStore, err := newImageStore(gipath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1041,15 +1090,9 @@ func (s *store) stopUsingGraphDriver() {
|
||||
// Almost all users should use startUsingGraphDriver instead.
|
||||
// The caller must hold s.graphLock.
|
||||
func (s *store) createGraphDriverLocked() (drivers.Driver, error) {
|
||||
driverRoot := s.imageStoreDir
|
||||
imageStoreBase := s.graphRoot
|
||||
if driverRoot == "" {
|
||||
driverRoot = s.graphRoot
|
||||
imageStoreBase = ""
|
||||
}
|
||||
config := drivers.Options{
|
||||
Root: driverRoot,
|
||||
ImageStore: imageStoreBase,
|
||||
Root: s.graphRoot,
|
||||
ImageStore: s.imageStoreDir,
|
||||
RunRoot: s.runRoot,
|
||||
DriverPriority: s.graphDriverPriority,
|
||||
DriverOptions: s.graphOptions,
|
||||
@ -1079,15 +1122,15 @@ func (s *store) getLayerStoreLocked() (rwLayerStore, error) {
|
||||
if err := os.MkdirAll(rlpath, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
imgStoreRoot := s.imageStoreDir
|
||||
if imgStoreRoot == "" {
|
||||
imgStoreRoot = s.graphRoot
|
||||
}
|
||||
glpath := filepath.Join(imgStoreRoot, driverPrefix+"layers")
|
||||
glpath := filepath.Join(s.graphRoot, driverPrefix+"layers")
|
||||
if err := os.MkdirAll(glpath, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rls, err := s.newLayerStore(rlpath, glpath, s.graphDriver, s.transientStore)
|
||||
ilpath := ""
|
||||
if s.imageStoreDir != "" {
|
||||
ilpath = filepath.Join(s.imageStoreDir, driverPrefix+"layers")
|
||||
}
|
||||
rls, err := s.newLayerStore(rlpath, glpath, ilpath, s.graphDriver, s.transientStore)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1118,8 +1161,10 @@ func (s *store) getROLayerStoresLocked() ([]roLayerStore, error) {
|
||||
if err := os.MkdirAll(rlpath, 0o700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, store := range s.graphDriver.AdditionalImageStores() {
|
||||
glpath := filepath.Join(store, driverPrefix+"layers")
|
||||
|
||||
rls, err := newROLayerStore(rlpath, glpath, s.graphDriver)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1400,8 +1445,7 @@ func (s *store) canUseShifting(uidmap, gidmap []idtools.IDMap) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
|
||||
var parentLayer *Layer
|
||||
func (s *store) putLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader, slo *stagedLayerOptions) (*Layer, int64, error) {
|
||||
rlstore, rlstores, err := s.bothLayerStoreKinds()
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
@ -1414,6 +1458,8 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
|
||||
return nil, -1, err
|
||||
}
|
||||
defer s.containerStore.stopWriting()
|
||||
|
||||
var parentLayer *Layer
|
||||
var options LayerOptions
|
||||
if lOptions != nil {
|
||||
options = *lOptions
|
||||
@ -1473,6 +1519,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
|
||||
}
|
||||
layerOptions := LayerOptions{
|
||||
OriginalDigest: options.OriginalDigest,
|
||||
OriginalSize: options.OriginalSize,
|
||||
UncompressedDigest: options.UncompressedDigest,
|
||||
Flags: options.Flags,
|
||||
}
|
||||
@ -1486,7 +1533,11 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w
|
||||
GIDMap: copyIDMap(gidMap),
|
||||
}
|
||||
}
|
||||
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff)
|
||||
return rlstore.create(id, parentLayer, names, mountLabel, nil, &layerOptions, writeable, diff, slo)
|
||||
}
|
||||
|
||||
func (s *store) PutLayer(id, parent string, names []string, mountLabel string, writeable bool, lOptions *LayerOptions, diff io.Reader) (*Layer, int64, error) {
|
||||
return s.putLayer(id, parent, names, mountLabel, writeable, lOptions, diff, nil)
|
||||
}
|
||||
|
||||
func (s *store) CreateLayer(id, parent string, names []string, mountLabel string, writeable bool, options *LayerOptions) (*Layer, error) {
|
||||
@ -1696,7 +1747,7 @@ func (s *store) imageTopLayerForMapping(image *Image, ristore roImageStore, rlst
|
||||
}
|
||||
}
|
||||
layerOptions.TemplateLayer = layer.ID
|
||||
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil)
|
||||
mappedLayer, _, err := rlstore.create("", parentLayer, nil, layer.MountLabel, nil, &layerOptions, false, nil, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating an ID-mapped copy of layer %q: %w", layer.ID, err)
|
||||
}
|
||||
@ -1867,7 +1918,7 @@ func (s *store) CreateContainer(id string, names []string, image, layer, metadat
|
||||
options.Flags[mountLabelFlag] = mountLabel
|
||||
}
|
||||
|
||||
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil)
|
||||
clayer, _, err := rlstore.create(layer, imageTopLayer, nil, mlabel, options.StorageOpt, layerOptions, true, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2540,7 +2591,7 @@ func (s *store) DeleteImage(id string, commit bool) (layers []string, err error)
|
||||
if err := s.writeToAllStores(func(rlstore rwLayerStore) error {
|
||||
// Delete image from all available imagestores configured to be used.
|
||||
imageFound := false
|
||||
for _, is := range append([]rwImageStore{s.imageStore}, s.rwImageStores...) {
|
||||
for _, is := range s.rwImageStores {
|
||||
if is != s.imageStore {
|
||||
// This is an additional writeable image store
|
||||
// so we must perform lock
|
||||
@ -2932,15 +2983,28 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
if stagingDirectory != diffOutput.Target {
|
||||
return fmt.Errorf("invalid value for staging directory, it must be the same as the differ target directory")
|
||||
}
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
if !rlstore.Exists(to) {
|
||||
return struct{}{}, ErrLayerUnknown
|
||||
}
|
||||
return struct{}{}, rlstore.ApplyDiffFromStagingDirectory(to, stagingDirectory, diffOutput, options)
|
||||
return struct{}{}, rlstore.applyDiffFromStagingDirectory(to, diffOutput, options)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) ApplyStagedLayer(args ApplyStagedLayerOptions) (*Layer, error) {
|
||||
slo := stagedLayerOptions{
|
||||
DiffOutput: args.DiffOutput,
|
||||
DiffOptions: args.DiffOptions,
|
||||
}
|
||||
|
||||
layer, _, err := s.putLayer(args.ID, args.ParentLayer, args.Names, args.MountLabel, args.Writeable, args.LayerOptions, nil, &slo)
|
||||
return layer, err
|
||||
}
|
||||
|
||||
func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
return struct{}{}, rlstore.CleanupStagingDirectory(stagingDirectory)
|
||||
@ -2948,6 +3012,13 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) CleanupStagedLayer(diffOutput *drivers.DriverWithDifferOutput) error {
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
return struct{}{}, rlstore.CleanupStagingDirectory(diffOutput.Target)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
|
||||
if to != "" && !rlstore.Exists(to) {
|
||||
@ -3311,6 +3382,27 @@ func (s *store) ContainerByLayer(id string) (*Container, error) {
|
||||
return nil, ErrContainerUnknown
|
||||
}
|
||||
|
||||
func (s *store) ImageDirectory(id string) (string, error) {
|
||||
foundImage := false
|
||||
if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
|
||||
if store.Exists(id) {
|
||||
foundImage = true
|
||||
}
|
||||
middleDir := s.graphDriverName + "-images"
|
||||
gipath := filepath.Join(s.GraphRoot(), middleDir, id, "userdata")
|
||||
if err := os.MkdirAll(gipath, 0o700); err != nil {
|
||||
return "", true, err
|
||||
}
|
||||
return gipath, true, nil
|
||||
}); done {
|
||||
return res, err
|
||||
}
|
||||
if foundImage {
|
||||
return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
|
||||
}
|
||||
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||
}
|
||||
|
||||
func (s *store) ContainerDirectory(id string) (string, error) {
|
||||
res, _, err := readContainerStore(s, func() (string, bool, error) {
|
||||
id, err := s.containerStore.Lookup(id)
|
||||
@ -3328,6 +3420,28 @@ func (s *store) ContainerDirectory(id string) (string, error) {
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (s *store) ImageRunDirectory(id string) (string, error) {
|
||||
foundImage := false
|
||||
if res, done, err := readAllImageStores(s, func(store roImageStore) (string, bool, error) {
|
||||
if store.Exists(id) {
|
||||
foundImage = true
|
||||
}
|
||||
|
||||
middleDir := s.graphDriverName + "-images"
|
||||
rcpath := filepath.Join(s.RunRoot(), middleDir, id, "userdata")
|
||||
if err := os.MkdirAll(rcpath, 0o700); err != nil {
|
||||
return "", true, err
|
||||
}
|
||||
return rcpath, true, nil
|
||||
}); done {
|
||||
return res, err
|
||||
}
|
||||
if foundImage {
|
||||
return "", fmt.Errorf("locating image with ID %q (consider removing the image to resolve the issue): %w", id, os.ErrNotExist)
|
||||
}
|
||||
return "", fmt.Errorf("locating image with ID %q: %w", id, ErrImageUnknown)
|
||||
}
|
||||
|
||||
func (s *store) ContainerRunDirectory(id string) (string, error) {
|
||||
res, _, err := readContainerStore(s, func() (string, bool, error) {
|
||||
id, err := s.containerStore.Lookup(id)
|
||||
|
4
vendor/github.com/containers/storage/userns.go
generated
vendored
4
vendor/github.com/containers/storage/userns.go
generated
vendored
@ -11,7 +11,7 @@ import (
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/containers/storage/types"
|
||||
libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
|
||||
libcontainerUser "github.com/moby/sys/user"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -175,7 +175,7 @@ outer:
|
||||
|
||||
// We need to create a temporary layer so we can mount it and lookup the
|
||||
// maximum IDs used.
|
||||
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil)
|
||||
clayer, _, err := rlstore.create("", topLayer, nil, "", nil, layerOptions, false, nil, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
10
vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md
generated
vendored
10
vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md
generated
vendored
@ -1,10 +0,0 @@
|
||||
Serious about security
|
||||
======================
|
||||
|
||||
Square recognizes the important contributions the security research community
|
||||
can make. We therefore encourage reporting security issues with the code
|
||||
contained in this repository.
|
||||
|
||||
If you believe you have discovered a security vulnerability, please follow the
|
||||
guidelines at <https://bugcrowd.com/squareopensource>.
|
||||
|
19
vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
generated
vendored
19
vendor/github.com/go-jose/go-jose/v3/CHANGELOG.md
generated
vendored
@ -1,6 +1,23 @@
|
||||
# v3.0.2
|
||||
|
||||
## Fixed
|
||||
|
||||
- DecryptMulti: handle decompression error (#19)
|
||||
|
||||
## Changed
|
||||
|
||||
- jwe/CompactSerialize: improve performance (#67)
|
||||
- Increase the default number of PBKDF2 iterations to 600k (#48)
|
||||
- Return the proper algorithm for ECDSA keys (#45)
|
||||
|
||||
## Added
|
||||
|
||||
- Add Thumbprint support for opaque signers (#38)
|
||||
|
||||
# v3.0.1
|
||||
|
||||
Fixed:
|
||||
## Fixed
|
||||
|
||||
- Security issue: an attacker specifying a large "p2c" value can cause
|
||||
JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
|
||||
amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
|
||||
|
53
vendor/github.com/go-jose/go-jose/v3/README.md
generated
vendored
53
vendor/github.com/go-jose/go-jose/v3/README.md
generated
vendored
@ -1,15 +1,18 @@
|
||||
# Go JOSE
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://travis-ci.org/go-jose/go-jose)
|
||||
[](https://coveralls.io/r/go-jose/go-jose)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt)
|
||||
[](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
|
||||
[](https://github.com/go-jose/go-jose/actions)
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. This includes support for JSON Web Encryption,
|
||||
JSON Web Signature, and JSON Web Token standards.
|
||||
|
||||
**Help Wanted!** If you'd like to help us develop this library please reach
|
||||
out to css (at) css.bio. While I'm still working on keeping this maintained,
|
||||
I have limited time for in-depth development and could use some additional help.
|
||||
|
||||
**Disclaimer**: This library contains encryption software that is subject to
|
||||
the U.S. Export Administration Regulations. You may not export, re-export,
|
||||
transfer or download this code or any part of it in violation of any United
|
||||
@ -21,13 +24,13 @@ US maintained blocked list.
|
||||
## Overview
|
||||
|
||||
The implementation follows the
|
||||
[JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
||||
[JSON Web Encryption](https://dx.doi.org/10.17487/RFC7516) (RFC 7516),
|
||||
[JSON Web Signature](https://dx.doi.org/10.17487/RFC7515) (RFC 7515), and
|
||||
[JSON Web Token](https://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications.
|
||||
Tables of supported algorithms are shown below. The library supports both
|
||||
the compact and JWS/JWE JSON Serialization formats, and has optional support for
|
||||
multiple recipients. It also comes with a small command-line utility
|
||||
([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util))
|
||||
([`jose-util`](https://pkg.go.dev/github.com/go-jose/go-jose/jose-util))
|
||||
for dealing with JOSE messages in a shell.
|
||||
|
||||
**Note**: We use a forked version of the `encoding/json` package from the Go
|
||||
@ -38,29 +41,19 @@ libraries in other languages.
|
||||
|
||||
### Versions
|
||||
|
||||
[Version 2](https://gopkg.in/go-jose/go-jose.v2)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v2),
|
||||
[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version:
|
||||
|
||||
import "gopkg.in/go-jose/go-jose.v2"
|
||||
|
||||
[Version 3](https://github.com/go-jose/go-jose)
|
||||
([branch](https://github.com/go-jose/go-jose/tree/master),
|
||||
[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet):
|
||||
([branch](https://github.com/go-jose/go-jose/tree/v3),
|
||||
[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v3), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
|
||||
|
||||
import "github.com/go-jose/go-jose/v3"
|
||||
|
||||
All new feature development takes place on the `master` branch, which we are
|
||||
preparing to release as version 3 soon. Version 2 will continue to receive
|
||||
critical bug and security fixes. Note that starting with version 3 we are
|
||||
using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher.
|
||||
|
||||
Version 1 (on the `v1` branch) is frozen and not supported anymore.
|
||||
The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
|
||||
are still useable but not actively developed anymore.
|
||||
|
||||
### Supported algorithms
|
||||
|
||||
See below for a table of supported algorithms. Algorithm identifiers match
|
||||
the names in the [JSON Web Algorithms](http://dx.doi.org/10.17487/RFC7518)
|
||||
the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
|
||||
standard where possible. The Godoc reference has a list of constants.
|
||||
|
||||
Key encryption | Algorithm identifier(s)
|
||||
@ -103,20 +96,20 @@ allows attaching a key id.
|
||||
|
||||
Algorithm(s) | Corresponding types
|
||||
:------------------------- | -------------------------------
|
||||
RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey)
|
||||
RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
|
||||
ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
|
||||
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
|
||||
AES, HMAC | []byte
|
||||
|
||||
<sup>1. Only available in version 2 or later of the package</sup>
|
||||
|
||||
## Examples
|
||||
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2)
|
||||
[](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3)
|
||||
[](https://pkg.go.dev/github.com/go-jose/go-jose/v3/jwt)
|
||||
|
||||
Examples can be found in the Godoc
|
||||
reference for this package. The
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)
|
||||
[`jose-util`](https://github.com/go-jose/go-jose/tree/v3/jose-util)
|
||||
subdirectory also contains a small command-line utility which might be useful
|
||||
as an example as well.
|
||||
|
13
vendor/github.com/go-jose/go-jose/v3/SECURITY.md
generated
vendored
Normal file
13
vendor/github.com/go-jose/go-jose/v3/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
# Security Policy
|
||||
This document explains how to contact the Let's Encrypt security team to report security vulnerabilities.
|
||||
|
||||
## Supported Versions
|
||||
| Version | Supported |
|
||||
| ------- | ----------|
|
||||
| >= v3 | ✓ |
|
||||
| v2 | ✗ |
|
||||
| v1 | ✗ |
|
||||
|
||||
## Reporting a vulnerability
|
||||
|
||||
Please see [https://letsencrypt.org/contact/#security](https://letsencrypt.org/contact/#security) for the email address to report a vulnerability. Ensure that the subject line for your report contains the word `vulnerability` and is descriptive. Your email should be acknowledged within 24 hours. If you do not receive a response within 24 hours, please follow-up again with another email.
|
3
vendor/github.com/go-jose/go-jose/v3/asymmetric.go
generated
vendored
3
vendor/github.com/go-jose/go-jose/v3/asymmetric.go
generated
vendored
@ -285,6 +285,9 @@ func (ctx rsaDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm
|
||||
|
||||
switch alg {
|
||||
case RS256, RS384, RS512:
|
||||
// TODO(https://github.com/go-jose/go-jose/issues/40): As of go1.20, the
|
||||
// random parameter is legacy and ignored, and it can be nil.
|
||||
// https://cs.opensource.google/go/go/+/refs/tags/go1.20:src/crypto/rsa/pkcs1v15.go;l=263;bpv=0;bpt=1
|
||||
out, err = rsa.SignPKCS1v15(RandReader, ctx.privateKey, hash, hashed)
|
||||
case PS256, PS384, PS512:
|
||||
out, err = rsa.SignPSS(RandReader, ctx.privateKey, hash, hashed, &rsa.PSSOptions{
|
||||
|
93
vendor/github.com/go-jose/go-jose/v3/crypter.go
generated
vendored
93
vendor/github.com/go-jose/go-jose/v3/crypter.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/go-jose/go-jose/v3/json"
|
||||
)
|
||||
@ -76,14 +75,24 @@ type recipientKeyInfo struct {
|
||||
type EncrypterOptions struct {
|
||||
Compression CompressionAlgorithm
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
// Optional map of name/value pairs to be inserted into the protected
|
||||
// header of a JWS object. Some specifications which make use of
|
||||
// JWS require additional values here.
|
||||
//
|
||||
// Values will be serialized by [json.Marshal] and must be valid inputs to
|
||||
// that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
// if necessary, and returns the updated EncrypterOptions.
|
||||
//
|
||||
// The v parameter will be serialized by [json.Marshal] and must be a valid
|
||||
// input to that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
func (eo *EncrypterOptions) WithHeader(k HeaderKey, v interface{}) *EncrypterOptions {
|
||||
if eo.ExtraHeaders == nil {
|
||||
eo.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
@ -111,7 +120,17 @@ func (eo *EncrypterOptions) WithType(typ ContentType) *EncrypterOptions {
|
||||
// default of 100000 will be used for the count and a 128-bit random salt will
|
||||
// be generated.
|
||||
type Recipient struct {
|
||||
Algorithm KeyAlgorithm
|
||||
Algorithm KeyAlgorithm
|
||||
// Key must have one of these types:
|
||||
// - ed25519.PublicKey
|
||||
// - *ecdsa.PublicKey
|
||||
// - *rsa.PublicKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - []byte (a symmetric key)
|
||||
// - Any type that satisfies the OpaqueKeyEncrypter interface
|
||||
//
|
||||
// The type of Key must match the value of Algorithm.
|
||||
Key interface{}
|
||||
KeyID string
|
||||
PBES2Count int
|
||||
@ -150,16 +169,17 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions)
|
||||
switch rcpt.Algorithm {
|
||||
case DIRECT:
|
||||
// Direct encryption mode must be treated differently
|
||||
if reflect.TypeOf(rawKey) != reflect.TypeOf([]byte{}) {
|
||||
keyBytes, ok := rawKey.([]byte)
|
||||
if !ok {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
if encrypter.cipher.keySize() != len(rawKey.([]byte)) {
|
||||
if encrypter.cipher.keySize() != len(keyBytes) {
|
||||
return nil, ErrInvalidKeySize
|
||||
}
|
||||
encrypter.keyGenerator = staticKeyGenerator{
|
||||
key: rawKey.([]byte),
|
||||
key: keyBytes,
|
||||
}
|
||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, rawKey.([]byte))
|
||||
recipientInfo, _ := newSymmetricRecipient(rcpt.Algorithm, keyBytes)
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
@ -168,16 +188,16 @@ func NewEncrypter(enc ContentEncryption, rcpt Recipient, opts *EncrypterOptions)
|
||||
return encrypter, nil
|
||||
case ECDH_ES:
|
||||
// ECDH-ES (w/o key wrapping) is similar to DIRECT mode
|
||||
typeOf := reflect.TypeOf(rawKey)
|
||||
if typeOf != reflect.TypeOf(&ecdsa.PublicKey{}) {
|
||||
keyDSA, ok := rawKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
encrypter.keyGenerator = ecKeyGenerator{
|
||||
size: encrypter.cipher.keySize(),
|
||||
algID: string(enc),
|
||||
publicKey: rawKey.(*ecdsa.PublicKey),
|
||||
publicKey: keyDSA,
|
||||
}
|
||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, rawKey.(*ecdsa.PublicKey))
|
||||
recipientInfo, _ := newECDHRecipient(rcpt.Algorithm, keyDSA)
|
||||
recipientInfo.keyID = keyID
|
||||
if rcpt.KeyID != "" {
|
||||
recipientInfo.keyID = rcpt.KeyID
|
||||
@ -270,9 +290,8 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey
|
||||
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
|
||||
recipient.keyID = encryptionKey.KeyID
|
||||
return recipient, err
|
||||
}
|
||||
if encrypter, ok := encryptionKey.(OpaqueKeyEncrypter); ok {
|
||||
return newOpaqueKeyEncrypter(alg, encrypter)
|
||||
case OpaqueKeyEncrypter:
|
||||
return newOpaqueKeyEncrypter(alg, encryptionKey)
|
||||
}
|
||||
return recipientKeyInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
@ -300,11 +319,11 @@ func newDecrypter(decryptionKey interface{}) (keyDecrypter, error) {
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newDecrypter(decryptionKey.Key)
|
||||
case OpaqueKeyDecrypter:
|
||||
return &opaqueKeyDecrypter{decrypter: decryptionKey}, nil
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
if okd, ok := decryptionKey.(OpaqueKeyDecrypter); ok {
|
||||
return &opaqueKeyDecrypter{decrypter: okd}, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
// Implementation of encrypt method producing a JWE object.
|
||||
@ -403,9 +422,24 @@ func (ctx *genericEncrypter) Options() EncrypterOptions {
|
||||
}
|
||||
}
|
||||
|
||||
// Decrypt and validate the object and return the plaintext. Note that this
|
||||
// function does not support multi-recipient, if you desire multi-recipient
|
||||
// Decrypt and validate the object and return the plaintext. This
|
||||
// function does not support multi-recipient. If you desire multi-recipient
|
||||
// decryption use DecryptMulti instead.
|
||||
//
|
||||
// The decryptionKey argument must contain a private or symmetric key
|
||||
// and must have one of these types:
|
||||
// - *ecdsa.PrivateKey
|
||||
// - *rsa.PrivateKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - *JSONWebKeySet
|
||||
// - JSONWebKeySet
|
||||
// - []byte (a symmetric key)
|
||||
// - string (a symmetric key)
|
||||
// - Any type that satisfies the OpaqueKeyDecrypter interface.
|
||||
//
|
||||
// Note that ed25519 is only available for signatures, not encryption, so is
|
||||
// not an option here.
|
||||
func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) {
|
||||
headers := obj.mergedHeaders(nil)
|
||||
|
||||
@ -462,15 +496,21 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return plaintext, err
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
// DecryptMulti decrypts and validates the object and returns the plaintexts,
|
||||
// with support for multiple recipients. It returns the index of the recipient
|
||||
// for which the decryption was successful, the merged headers for that recipient,
|
||||
// and the plaintext.
|
||||
//
|
||||
// The decryptionKey argument must have one of the types allowed for the
|
||||
// decryptionKey argument of Decrypt().
|
||||
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
|
||||
globalHeaders := obj.mergedHeaders(nil)
|
||||
|
||||
@ -532,7 +572,10 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade
|
||||
|
||||
// The "zip" header parameter may only be present in the protected header.
|
||||
if comp := obj.protected.getCompression(); comp != "" {
|
||||
plaintext, _ = decompress(comp, plaintext)
|
||||
plaintext, err = decompress(comp, plaintext)
|
||||
if err != nil {
|
||||
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to decompress plaintext: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
sanitized, err := headers.sanitized()
|
||||
|
2
vendor/github.com/go-jose/go-jose/v3/doc.go
generated
vendored
2
vendor/github.com/go-jose/go-jose/v3/doc.go
generated
vendored
@ -15,13 +15,11 @@
|
||||
*/
|
||||
|
||||
/*
|
||||
|
||||
Package jose aims to provide an implementation of the Javascript Object Signing
|
||||
and Encryption set of standards. It implements encryption and signing based on
|
||||
the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web
|
||||
Token support available in a sub-package. The library supports both the compact
|
||||
and JWS/JWE JSON Serialization formats, and has optional support for multiple
|
||||
recipients.
|
||||
|
||||
*/
|
||||
package jose
|
||||
|
33
vendor/github.com/go-jose/go-jose/v3/encoding.go
generated
vendored
33
vendor/github.com/go-jose/go-jose/v3/encoding.go
generated
vendored
@ -189,3 +189,36 @@ func base64URLDecode(value string) ([]byte, error) {
|
||||
value = strings.TrimRight(value, "=")
|
||||
return base64.RawURLEncoding.DecodeString(value)
|
||||
}
|
||||
|
||||
func base64EncodeLen(sl []byte) int {
|
||||
return base64.RawURLEncoding.EncodedLen(len(sl))
|
||||
}
|
||||
|
||||
func base64JoinWithDots(inputs ...[]byte) string {
|
||||
if len(inputs) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Count of dots.
|
||||
totalCount := len(inputs) - 1
|
||||
|
||||
for _, input := range inputs {
|
||||
totalCount += base64EncodeLen(input)
|
||||
}
|
||||
|
||||
out := make([]byte, totalCount)
|
||||
startEncode := 0
|
||||
for i, input := range inputs {
|
||||
base64.RawURLEncoding.Encode(out[startEncode:], input)
|
||||
|
||||
if i == len(inputs)-1 {
|
||||
continue
|
||||
}
|
||||
|
||||
startEncode += base64EncodeLen(input)
|
||||
out[startEncode] = '.'
|
||||
startEncode++
|
||||
}
|
||||
|
||||
return string(out)
|
||||
}
|
||||
|
3
vendor/github.com/go-jose/go-jose/v3/json/decode.go
generated
vendored
3
vendor/github.com/go-jose/go-jose/v3/json/decode.go
generated
vendored
@ -75,14 +75,13 @@ import (
|
||||
//
|
||||
// The JSON null value unmarshals into an interface, map, pointer, or slice
|
||||
// by setting that Go value to nil. Because null is often used in JSON to mean
|
||||
// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
|
||||
// “not present,” unmarshaling a JSON null into any other Go type has no effect
|
||||
// on the value and produces no error.
|
||||
//
|
||||
// When unmarshaling quoted strings, invalid UTF-8 or
|
||||
// invalid UTF-16 surrogate pairs are not treated as an error.
|
||||
// Instead, they are replaced by the Unicode replacement
|
||||
// character U+FFFD.
|
||||
//
|
||||
func Unmarshal(data []byte, v interface{}) error {
|
||||
// Check for well-formedness.
|
||||
// Avoids filling out half a data structure
|
||||
|
28
vendor/github.com/go-jose/go-jose/v3/json/encode.go
generated
vendored
28
vendor/github.com/go-jose/go-jose/v3/json/encode.go
generated
vendored
@ -58,6 +58,7 @@ import (
|
||||
// becomes a member of the object unless
|
||||
// - the field's tag is "-", or
|
||||
// - the field is empty and its tag specifies the "omitempty" option.
|
||||
//
|
||||
// The empty values are false, 0, any
|
||||
// nil pointer or interface value, and any array, slice, map, or string of
|
||||
// length zero. The object's default key string is the struct field name
|
||||
@ -65,28 +66,28 @@ import (
|
||||
// the struct field's tag value is the key name, followed by an optional comma
|
||||
// and options. Examples:
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field int `json:"-"`
|
||||
// // Field is ignored by this package.
|
||||
// Field int `json:"-"`
|
||||
//
|
||||
// // Field appears in JSON as key "myName".
|
||||
// Field int `json:"myName"`
|
||||
// // Field appears in JSON as key "myName".
|
||||
// Field int `json:"myName"`
|
||||
//
|
||||
// // Field appears in JSON as key "myName" and
|
||||
// // the field is omitted from the object if its value is empty,
|
||||
// // as defined above.
|
||||
// Field int `json:"myName,omitempty"`
|
||||
// // Field appears in JSON as key "myName" and
|
||||
// // the field is omitted from the object if its value is empty,
|
||||
// // as defined above.
|
||||
// Field int `json:"myName,omitempty"`
|
||||
//
|
||||
// // Field appears in JSON as key "Field" (the default), but
|
||||
// // the field is skipped if empty.
|
||||
// // Note the leading comma.
|
||||
// Field int `json:",omitempty"`
|
||||
// // Field appears in JSON as key "Field" (the default), but
|
||||
// // the field is skipped if empty.
|
||||
// // Note the leading comma.
|
||||
// Field int `json:",omitempty"`
|
||||
//
|
||||
// The "string" option signals that a field is stored as JSON inside a
|
||||
// JSON-encoded string. It applies only to fields of string, floating point,
|
||||
// integer, or boolean types. This extra level of encoding is sometimes used
|
||||
// when communicating with JavaScript programs:
|
||||
//
|
||||
// Int64String int64 `json:",string"`
|
||||
// Int64String int64 `json:",string"`
|
||||
//
|
||||
// The key name will be used if it's a non-empty string consisting of
|
||||
// only Unicode letters, digits, dollar signs, percent signs, hyphens,
|
||||
@ -133,7 +134,6 @@ import (
|
||||
// JSON cannot represent cyclic data structures and Marshal does not
|
||||
// handle them. Passing cyclic structures to Marshal will result in
|
||||
// an infinite recursion.
|
||||
//
|
||||
func Marshal(v interface{}) ([]byte, error) {
|
||||
e := &encodeState{}
|
||||
err := e.marshal(v)
|
||||
|
1
vendor/github.com/go-jose/go-jose/v3/json/stream.go
generated
vendored
1
vendor/github.com/go-jose/go-jose/v3/json/stream.go
generated
vendored
@ -240,7 +240,6 @@ var _ Unmarshaler = (*RawMessage)(nil)
|
||||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
//
|
||||
type Token interface{}
|
||||
|
||||
const (
|
||||
|
14
vendor/github.com/go-jose/go-jose/v3/jwe.go
generated
vendored
14
vendor/github.com/go-jose/go-jose/v3/jwe.go
generated
vendored
@ -252,13 +252,13 @@ func (obj JSONWebEncryption) CompactSerialize() (string, error) {
|
||||
|
||||
serializedProtected := mustSerializeJSON(obj.protected)
|
||||
|
||||
return fmt.Sprintf(
|
||||
"%s.%s.%s.%s.%s",
|
||||
base64.RawURLEncoding.EncodeToString(serializedProtected),
|
||||
base64.RawURLEncoding.EncodeToString(obj.recipients[0].encryptedKey),
|
||||
base64.RawURLEncoding.EncodeToString(obj.iv),
|
||||
base64.RawURLEncoding.EncodeToString(obj.ciphertext),
|
||||
base64.RawURLEncoding.EncodeToString(obj.tag)), nil
|
||||
return base64JoinWithDots(
|
||||
serializedProtected,
|
||||
obj.recipients[0].encryptedKey,
|
||||
obj.iv,
|
||||
obj.ciphertext,
|
||||
obj.tag,
|
||||
), nil
|
||||
}
|
||||
|
||||
// FullSerialize serializes an object using the full JSON serialization format.
|
||||
|
18
vendor/github.com/go-jose/go-jose/v3/jwk.go
generated
vendored
18
vendor/github.com/go-jose/go-jose/v3/jwk.go
generated
vendored
@ -67,9 +67,21 @@ type rawJSONWebKey struct {
|
||||
X5tSHA256 string `json:"x5t#S256,omitempty"`
|
||||
}
|
||||
|
||||
// JSONWebKey represents a public or private key in JWK format.
|
||||
// JSONWebKey represents a public or private key in JWK format. It can be
|
||||
// marshaled into JSON and unmarshaled from JSON.
|
||||
type JSONWebKey struct {
|
||||
// Cryptographic key, can be a symmetric or asymmetric key.
|
||||
// Key is the Go in-memory representation of this key. It must have one
|
||||
// of these types:
|
||||
// - ed25519.PublicKey
|
||||
// - ed25519.PrivateKey
|
||||
// - *ecdsa.PublicKey
|
||||
// - *ecdsa.PrivateKey
|
||||
// - *rsa.PublicKey
|
||||
// - *rsa.PrivateKey
|
||||
// - []byte (a symmetric key)
|
||||
//
|
||||
// When marshaling this JSONWebKey into JSON, the "kty" header parameter
|
||||
// will be automatically set based on the type of this field.
|
||||
Key interface{}
|
||||
// Key identifier, parsed from `kid` header.
|
||||
KeyID string
|
||||
@ -389,6 +401,8 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) {
|
||||
input, err = rsaThumbprintInput(key.N, key.E)
|
||||
case ed25519.PrivateKey:
|
||||
input, err = edThumbprintInput(ed25519.PublicKey(key[32:]))
|
||||
case OpaqueSigner:
|
||||
return key.Public().Thumbprint(hash)
|
||||
default:
|
||||
return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key))
|
||||
}
|
||||
|
13
vendor/github.com/go-jose/go-jose/v3/jws.go
generated
vendored
13
vendor/github.com/go-jose/go-jose/v3/jws.go
generated
vendored
@ -314,15 +314,18 @@ func (obj JSONWebSignature) compactSerialize(detached bool) (string, error) {
|
||||
return "", ErrNotSupported
|
||||
}
|
||||
|
||||
serializedProtected := base64.RawURLEncoding.EncodeToString(mustSerializeJSON(obj.Signatures[0].protected))
|
||||
payload := ""
|
||||
signature := base64.RawURLEncoding.EncodeToString(obj.Signatures[0].Signature)
|
||||
serializedProtected := mustSerializeJSON(obj.Signatures[0].protected)
|
||||
|
||||
var payload []byte
|
||||
if !detached {
|
||||
payload = base64.RawURLEncoding.EncodeToString(obj.payload)
|
||||
payload = obj.payload
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s.%s.%s", serializedProtected, payload, signature), nil
|
||||
return base64JoinWithDots(
|
||||
serializedProtected,
|
||||
payload,
|
||||
obj.Signatures[0].Signature,
|
||||
), nil
|
||||
}
|
||||
|
||||
// CompactSerialize serializes an object using the compact serialization format.
|
||||
|
2
vendor/github.com/go-jose/go-jose/v3/opaque.go
generated
vendored
2
vendor/github.com/go-jose/go-jose/v3/opaque.go
generated
vendored
@ -121,7 +121,7 @@ func (oke *opaqueKeyEncrypter) encryptKey(cek []byte, alg KeyAlgorithm) (recipie
|
||||
return oke.encrypter.encryptKey(cek, alg)
|
||||
}
|
||||
|
||||
//OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
|
||||
// OpaqueKeyDecrypter is an interface that supports decrypting keys with an opaque key.
|
||||
type OpaqueKeyDecrypter interface {
|
||||
DecryptKey(encryptedKey []byte, header Header) ([]byte, error)
|
||||
}
|
||||
|
9
vendor/github.com/go-jose/go-jose/v3/shared.go
generated
vendored
9
vendor/github.com/go-jose/go-jose/v3/shared.go
generated
vendored
@ -183,8 +183,13 @@ type Header struct {
|
||||
// Unverified certificate chain parsed from x5c header.
|
||||
certificates []*x509.Certificate
|
||||
|
||||
// Any headers not recognised above get unmarshalled
|
||||
// from JSON in a generic manner and placed in this map.
|
||||
// At parse time, each header parameter with a name other than "kid",
|
||||
// "jwk", "alg", "nonce", or "x5c" will have its value passed to
|
||||
// [json.Unmarshal] to unmarshal it into an interface value.
|
||||
// The resulting value will be stored in this map, with the header
|
||||
// parameter name as the key.
|
||||
//
|
||||
// [json.Unmarshal]: https://pkg.go.dev/encoding/json#Unmarshal
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
|
59
vendor/github.com/go-jose/go-jose/v3/signing.go
generated
vendored
59
vendor/github.com/go-jose/go-jose/v3/signing.go
generated
vendored
@ -40,6 +40,15 @@ type Signer interface {
|
||||
}
|
||||
|
||||
// SigningKey represents an algorithm/key used to sign a message.
|
||||
//
|
||||
// Key must have one of these types:
|
||||
// - ed25519.PrivateKey
|
||||
// - *ecdsa.PrivateKey
|
||||
// - *rsa.PrivateKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - []byte (an HMAC key)
|
||||
// - Any type that satisfies the OpaqueSigner interface
|
||||
type SigningKey struct {
|
||||
Algorithm SignatureAlgorithm
|
||||
Key interface{}
|
||||
@ -52,12 +61,22 @@ type SignerOptions struct {
|
||||
|
||||
// Optional map of additional keys to be inserted into the protected header
|
||||
// of a JWS object. Some specifications which make use of JWS like to insert
|
||||
// additional values here. All values must be JSON-serializable.
|
||||
// additional values here.
|
||||
//
|
||||
// Values will be serialized by [json.Marshal] and must be valid inputs to
|
||||
// that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
ExtraHeaders map[HeaderKey]interface{}
|
||||
}
|
||||
|
||||
// WithHeader adds an arbitrary value to the ExtraHeaders map, initializing it
|
||||
// if necessary. It returns itself and so can be used in a fluent style.
|
||||
// if necessary, and returns the updated SignerOptions.
|
||||
//
|
||||
// The v argument will be serialized by [json.Marshal] and must be a valid
|
||||
// input to that function.
|
||||
//
|
||||
// [json.Marshal]: https://pkg.go.dev/encoding/json#Marshal
|
||||
func (so *SignerOptions) WithHeader(k HeaderKey, v interface{}) *SignerOptions {
|
||||
if so.ExtraHeaders == nil {
|
||||
so.ExtraHeaders = map[HeaderKey]interface{}{}
|
||||
@ -173,11 +192,11 @@ func newVerifier(verificationKey interface{}) (payloadVerifier, error) {
|
||||
return newVerifier(verificationKey.Key)
|
||||
case *JSONWebKey:
|
||||
return newVerifier(verificationKey.Key)
|
||||
case OpaqueVerifier:
|
||||
return &opaqueVerifier{verifier: verificationKey}, nil
|
||||
default:
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
if ov, ok := verificationKey.(OpaqueVerifier); ok {
|
||||
return &opaqueVerifier{verifier: ov}, nil
|
||||
}
|
||||
return nil, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
func (ctx *genericSigner) addRecipient(alg SignatureAlgorithm, signingKey interface{}) error {
|
||||
@ -204,11 +223,11 @@ func makeJWSRecipient(alg SignatureAlgorithm, signingKey interface{}) (recipient
|
||||
return newJWKSigner(alg, signingKey)
|
||||
case *JSONWebKey:
|
||||
return newJWKSigner(alg, *signingKey)
|
||||
case OpaqueSigner:
|
||||
return newOpaqueSigner(alg, signingKey)
|
||||
default:
|
||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
if signer, ok := signingKey.(OpaqueSigner); ok {
|
||||
return newOpaqueSigner(alg, signer)
|
||||
}
|
||||
return recipientSigInfo{}, ErrUnsupportedKeyType
|
||||
}
|
||||
|
||||
func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigInfo, error) {
|
||||
@ -321,12 +340,21 @@ func (ctx *genericSigner) Options() SignerOptions {
|
||||
}
|
||||
|
||||
// Verify validates the signature on the object and returns the payload.
|
||||
// This function does not support multi-signature, if you desire multi-sig
|
||||
// This function does not support multi-signature. If you desire multi-signature
|
||||
// verification use VerifyMulti instead.
|
||||
//
|
||||
// Be careful when verifying signatures based on embedded JWKs inside the
|
||||
// payload header. You cannot assume that the key received in a payload is
|
||||
// trusted.
|
||||
//
|
||||
// The verificationKey argument must have one of these types:
|
||||
// - ed25519.PublicKey
|
||||
// - *ecdsa.PublicKey
|
||||
// - *rsa.PublicKey
|
||||
// - *JSONWebKey
|
||||
// - JSONWebKey
|
||||
// - []byte (an HMAC key)
|
||||
// - Any type that implements the OpaqueVerifier interface.
|
||||
func (obj JSONWebSignature) Verify(verificationKey interface{}) ([]byte, error) {
|
||||
err := obj.DetachedVerify(obj.payload, verificationKey)
|
||||
if err != nil {
|
||||
@ -346,6 +374,9 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte {
|
||||
// most cases, you will probably want to use Verify instead. DetachedVerify
|
||||
// is only useful if you have a payload and signature that are separated from
|
||||
// each other.
|
||||
//
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
@ -388,6 +419,9 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
|
||||
// returns the index of the signature that was verified, along with the signature
|
||||
// object and the payload. We return the signature and index to guarantee that
|
||||
// callers are getting the verified value.
|
||||
//
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signature, []byte, error) {
|
||||
idx, sig, err := obj.DetachedVerifyMulti(obj.payload, verificationKey)
|
||||
if err != nil {
|
||||
@ -405,6 +439,9 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa
|
||||
// DetachedVerifyMulti is only useful if you have a payload and signature that are
|
||||
// separated from each other, and the signature can have multiple signers at the
|
||||
// same time.
|
||||
//
|
||||
// The verificationKey argument must have one of the types allowed for the
|
||||
// verificationKey argument of JSONWebSignature.Verify().
|
||||
func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) {
|
||||
key := tryJWKS(verificationKey, obj.headers()...)
|
||||
verifier, err := newVerifier(key)
|
||||
|
15
vendor/github.com/go-jose/go-jose/v3/symmetric.go
generated
vendored
15
vendor/github.com/go-jose/go-jose/v3/symmetric.go
generated
vendored
@ -40,12 +40,17 @@ var RandReader = rand.Reader
|
||||
|
||||
const (
|
||||
// RFC7518 recommends a minimum of 1,000 iterations:
|
||||
// https://tools.ietf.org/html/rfc7518#section-4.8.1.2
|
||||
// - https://tools.ietf.org/html/rfc7518#section-4.8.1.2
|
||||
//
|
||||
// NIST recommends a minimum of 10,000:
|
||||
// https://pages.nist.gov/800-63-3/sp800-63b.html
|
||||
// 1Password uses 100,000:
|
||||
// https://support.1password.com/pbkdf2/
|
||||
defaultP2C = 100000
|
||||
// - https://pages.nist.gov/800-63-3/sp800-63b.html
|
||||
//
|
||||
// 1Password increased in 2023 from 100,000 to 650,000:
|
||||
// - https://support.1password.com/pbkdf2/
|
||||
//
|
||||
// OWASP recommended 600,000 in Dec 2022:
|
||||
// - https://cheatsheetseries.owasp.org/cheatsheets/Password_Storage_Cheat_Sheet.html#pbkdf2
|
||||
defaultP2C = 600000
|
||||
// Default salt size: 128 bits
|
||||
defaultP2SSize = 16
|
||||
)
|
||||
|
3
vendor/github.com/go-openapi/strfmt/format.go
generated
vendored
3
vendor/github.com/go-openapi/strfmt/format.go
generated
vendored
@ -16,6 +16,7 @@ package strfmt
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
stderrors "errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -117,7 +118,7 @@ func (f *defaultFormats) MapStructureHookFunc() mapstructure.DecodeHookFunc {
|
||||
case "datetime":
|
||||
input := data
|
||||
if len(input) == 0 {
|
||||
return nil, fmt.Errorf("empty string is an invalid datetime format")
|
||||
return nil, stderrors.New("empty string is an invalid datetime format")
|
||||
}
|
||||
return ParseDateTime(input)
|
||||
case "duration":
|
||||
|
13
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
13
vendor/github.com/google/uuid/CHANGELOG.md
generated
vendored
@ -1,5 +1,18 @@
|
||||
# Changelog
|
||||
|
||||
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
|
||||
|
||||
|
||||
### Features
|
||||
|
||||
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
|
||||
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
|
||||
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
|
||||
|
||||
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
|
||||
|
||||
|
||||
|
6
vendor/github.com/google/uuid/hash.go
generated
vendored
6
vendor/github.com/google/uuid/hash.go
generated
vendored
@ -17,6 +17,12 @@ var (
|
||||
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
|
||||
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
|
||||
Nil UUID // empty UUID, all zeros
|
||||
|
||||
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
|
||||
Max = UUID{
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
|
||||
}
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID derived from the hash of space concatenated with
|
||||
|
39
vendor/github.com/google/uuid/version7.go
generated
vendored
39
vendor/github.com/google/uuid/version7.go
generated
vendored
@ -44,7 +44,7 @@ func NewV7FromReader(r io.Reader) (UUID, error) {
|
||||
|
||||
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
|
||||
// uuid[8] already has the right version number (Variant is 10)
|
||||
// see function NewV7 and NewV7FromReader
|
||||
// see function NewV7 and NewV7FromReader
|
||||
func makeV7(uuid []byte) {
|
||||
/*
|
||||
0 1 2 3
|
||||
@ -52,7 +52,7 @@ func makeV7(uuid []byte) {
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| unix_ts_ms | ver | rand_a |
|
||||
| unix_ts_ms | ver | rand_a (12 bit seq) |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
|var| rand_b |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
@ -61,7 +61,7 @@ func makeV7(uuid []byte) {
|
||||
*/
|
||||
_ = uuid[15] // bounds check
|
||||
|
||||
t := timeNow().UnixMilli()
|
||||
t, s := getV7Time()
|
||||
|
||||
uuid[0] = byte(t >> 40)
|
||||
uuid[1] = byte(t >> 32)
|
||||
@ -70,6 +70,35 @@ func makeV7(uuid []byte) {
|
||||
uuid[4] = byte(t >> 8)
|
||||
uuid[5] = byte(t)
|
||||
|
||||
uuid[6] = 0x70 | (uuid[6] & 0x0F)
|
||||
// uuid[8] has already has right version
|
||||
uuid[6] = 0x70 | (0x0F & byte(s>>8))
|
||||
uuid[7] = byte(s)
|
||||
}
|
||||
|
||||
// lastV7time is the last time we returned stored as:
|
||||
//
|
||||
// 52 bits of time in milliseconds since epoch
|
||||
// 12 bits of (fractional nanoseconds) >> 8
|
||||
var lastV7time int64
|
||||
|
||||
const nanoPerMilli = 1000000
|
||||
|
||||
// getV7Time returns the time in milliseconds and nanoseconds / 256.
|
||||
// The returned (milli << 12 + seq) is guarenteed to be greater than
|
||||
// (milli << 12 + seq) returned by any previous call to getV7Time.
|
||||
func getV7Time() (milli, seq int64) {
|
||||
timeMu.Lock()
|
||||
defer timeMu.Unlock()
|
||||
|
||||
nano := timeNow().UnixNano()
|
||||
milli = nano / nanoPerMilli
|
||||
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
|
||||
seq = (nano - milli*nanoPerMilli) >> 8
|
||||
now := milli<<12 + seq
|
||||
if now <= lastV7time {
|
||||
now = lastV7time + 1
|
||||
milli = now >> 12
|
||||
seq = now & 0xfff
|
||||
}
|
||||
lastV7time = now
|
||||
return milli, seq
|
||||
}
|
||||
|
10
vendor/github.com/klauspost/compress/README.md
generated
vendored
10
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -16,10 +16,14 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
|
||||
* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
|
||||
* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
|
||||
|
||||
* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)
|
||||
* flate: Fix reset with dictionary on custom window encodes by @klauspost in https://github.com/klauspost/compress/pull/912
|
||||
* zstd: Add Frame header encoding and stripping by @klauspost in https://github.com/klauspost/compress/pull/908
|
||||
* zstd: Limit better/best default window to 8MB by @klauspost in https://github.com/klauspost/compress/pull/913
|
||||
* flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912
|
||||
* zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908
|
||||
* zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913
|
||||
* zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910
|
||||
* s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917
|
||||
https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918
|
||||
|
13
vendor/github.com/opencontainers/runc/LICENSE → vendor/github.com/moby/sys/user/LICENSE
generated
vendored
13
vendor/github.com/opencontainers/runc/LICENSE → vendor/github.com/moby/sys/user/LICENSE
generated
vendored
@ -176,7 +176,18 @@
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2014 Docker, Inc.
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
17
vendor/github.com/opencontainers/runc/NOTICE
generated
vendored
17
vendor/github.com/opencontainers/runc/NOTICE
generated
vendored
@ -1,17 +0,0 @@
|
||||
runc
|
||||
|
||||
Copyright 2012-2015 Docker, Inc.
|
||||
|
||||
This product includes software developed at Docker, Inc. (http://www.docker.com).
|
||||
|
||||
The following is courtesy of our legal counsel:
|
||||
|
||||
|
||||
Use and transfer of Docker may be subject to certain restrictions by the
|
||||
United States and other governments.
|
||||
It is your responsibility to ensure that your use and/or transfer does not
|
||||
violate applicable laws.
|
||||
|
||||
For more information, please see http://www.bis.doc.gov
|
||||
|
||||
See also http://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
10
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
10
vendor/github.com/opencontainers/runtime-spec/specs-go/config.go
generated
vendored
@ -187,6 +187,10 @@ type Hook struct {
|
||||
type Hooks struct {
|
||||
// Prestart is Deprecated. Prestart is a list of hooks to be run before the container process is executed.
|
||||
// It is called in the Runtime Namespace
|
||||
//
|
||||
// Deprecated: use [Hooks.CreateRuntime], [Hooks.CreateContainer], and
|
||||
// [Hooks.StartContainer] instead, which allow more granular hook control
|
||||
// during the create and start phase.
|
||||
Prestart []Hook `json:"prestart,omitempty"`
|
||||
// CreateRuntime is a list of hooks to be run after the container has been created but before pivot_root or any equivalent operation has been called
|
||||
// It is called in the Runtime Namespace
|
||||
@ -371,6 +375,12 @@ type LinuxMemory struct {
|
||||
// Total memory limit (memory + swap).
|
||||
Swap *int64 `json:"swap,omitempty"`
|
||||
// Kernel memory limit (in bytes).
|
||||
//
|
||||
// Deprecated: kernel-memory limits are not supported in cgroups v2, and
|
||||
// were obsoleted in [kernel v5.4]. This field should no longer be used,
|
||||
// as it may be ignored by runtimes.
|
||||
//
|
||||
// [kernel v5.4]: https://github.com/torvalds/linux/commit/0158115f702b0ba208ab0
|
||||
Kernel *int64 `json:"kernel,omitempty"`
|
||||
// Kernel memory limit for tcp (in bytes)
|
||||
KernelTCP *int64 `json:"kernelTCP,omitempty"`
|
||||
|
2
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
2
vendor/github.com/opencontainers/runtime-spec/specs-go/version.go
generated
vendored
@ -6,7 +6,7 @@ const (
|
||||
// VersionMajor is for an API incompatible changes
|
||||
VersionMajor = 1
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 1
|
||||
VersionMinor = 2
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 0
|
||||
|
||||
|
114
vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go
generated
vendored
114
vendor/github.com/sigstore/sigstore/pkg/oauth/interactive.go
generated
vendored
@ -15,8 +15,120 @@
|
||||
// Package oauth implements OAuth/OIDC support for device and token flows
|
||||
package oauth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// GetInteractiveSuccessHTML is the page displayed upon success when using a web browser during an interactive Oauth token flow.
|
||||
// The page will close automatically if autoclose is true with the timeout specified.
|
||||
func GetInteractiveSuccessHTML(autoclose bool, timeout int) (string, error) {
|
||||
const successTemplate = `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Sigstore Authentication</title>
|
||||
<link id="favicon" rel="icon" type="image/svg"/>
|
||||
<style>
|
||||
:root { font-family: "Trebuchet MS", sans-serif; height: 100%; color: #444444; overflow: hidden; }
|
||||
body { display: flex; justify-content: center; height: 100%; margin: 0 10%; background: #FFEAD7; }
|
||||
.container { display: flex; flex-direction: column; justify-content: space-between; }
|
||||
.sigstore { color: #2F2E71; font-weight: bold; }
|
||||
.header { position: absolute; top: 30px; left: 22px; }
|
||||
.title { font-size: 3.5em; margin-bottom: 30px; animation: 750ms ease-in-out 0s 1 show; }
|
||||
.content { font-size: 1.5em; animation: 250ms hide, 750ms ease-in-out 250ms 1 show; }
|
||||
.anchor { position: relative; }
|
||||
.links { display: flex; justify-content: space-between; font-size: 1.2em; padding: 60px 0; position: absolute; bottom: 0; left: 0; right: 0; animation: 500ms hide, 750ms ease-in-out 500ms 1 show; }
|
||||
.link { color: #444444; text-decoration: none; user-select: none; }
|
||||
.link:hover { color: #6349FF; }
|
||||
.link:hover>.arrow { transform: scaleX(1.5) translateX(3px); }
|
||||
.link:hover>.sigstore { color: inherit; }
|
||||
.link, .arrow { transition: 200ms; }
|
||||
.arrow { display: inline-block; margin-left: 6px; transform: scaleX(1.5); }
|
||||
@keyframes hide { 0%, 100% { opacity: 0; } }
|
||||
@keyframes show { 0% { opacity: 0; transform: translateY(40px); } 100% { opacity: 1; } }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div>
|
||||
<a class="header" href="https://sigstore.dev">
|
||||
<svg id="logo" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" width="28.14" height="30.3">
|
||||
<circle r="7" cx="14" cy="15" fill="#FFEAD7"></circle>
|
||||
<path fill="#2F2E71" d="M27.8 10.9c-.3-1.2-.9-2.2-1.7-3.1-.6-.7-1.3-1.3-2-2-.7-.6-1.2-1.3-1.5-2.1-.2-.4-.4-.8-.7-1.2-.5-.7-1.3-1.2-2.1-1.6-1.3-.7-2.7-.9-4.2-.9-.8 0-1.6.1-2.4.3-1.2.2-2.3.7-3.4 1.3-.7.4-1.3.9-1.9 1.4-1 .8-2 1.6-2.8 2.6-.6.8-1.4 1.3-2.2 1.8-.8.4-1.4 1-2 1.6-.6.6-.9 1.3-.9 2.1 0 .6.1 1.2.2 1.7.2.9.6 1.7.9 2.6.2.5.3 1 .3 1.5s0 1-.1 1.5c-.1 1.1 0 2.3.2 3.4.2 1 .8 1.8 1.8 2.2.1.1.3.1.4.1.2.1.2.2.1.3l-.1.1c-.4.5-.7 1.1-.6 1.8.1 1.1 1.3 1.8 2.3 1.3.6-.2 1.2 0 1.4.4.1.1.1.2.2.3.2.5.4.9.7 1.3.4.5.9.7 1.6.6.4-.1.8-.2 1.2-.4.7-.4 1.3-.9 2-1.5.2-.2.4-.2.7-.2.4 0 .8.2 1.2.5.6.4 1.2.7 1.9.9 1.3.4 2.5.5 3.8.2 1.3-.3 2.4-.9 3.4-1.6.7-.5 1.2-1 1.6-1.7.4-.7.6-1.4.8-2.2.3-1.1.4-2.2.4-3.4.1-1 .2-1.9.5-2.8.2-.7.5-1.4.8-2.1.2-.6.4-1.2.5-1.9.1-1.1 0-2.1-.3-3.1zM14.9.8c.3-.1.7-.1 1-.1h.3c1.1 0 2.1.2 3.1.5.6.2 1.2.6 1.7 1s.7.9.9 1.4v.1c0 .1 0 .2-.1.2s-.1 0-.2-.1c-.4-.4-.7-.8-1.1-1.1-.6-.5-1.2-.9-2-1.1-1.1-.3-2.1-.5-3.2-.7h-.6c.1 0 .1 0 .2-.1-.1 0 0 0 0 0zm-4.5 12.4c.6 0 1.1.5 1.2 1.2 0 .6-.5 1.2-1.2 1.2-.6 0-1.2-.5-1.1-1.2 0-.7.5-1.2 1.1-1.2zm3.8 1.3v-3.4c0-2.3 2-3.1 3.6-2.5.3.1.6.3.9.5.2.2.2.5.1.8-.2.2-.4.3-.7.1-.2-.1-.5-.2-.7-.3-.6-.2-1.3 0-1.6.4-.1.2-.2.4-.2.7-.1.5 0 .9 0 1.4v5.9c0 1.2-.6 2.1-1.8 2.4-1 .3-1.9.2-2.7-.6-.2-.2-.3-.5-.1-.7.1-.2.4-.3.7-.2.3.1.6.3.9.4 1 .1 1.7-.3 1.7-1.4-.1-1.2-.1-2.3-.1-3.5zm-8.8 7.6h-.1c-.1-.1-.2-.1-.3-.2-.2-.2-.4-.3-.6-.5-.3-.3-.5-.6-.7-1-.4-.8-.8-1.7-1-2.7-.1-.5-.2-1-.2-1.5s-.1-1-.2-1.4c-.1-.7-.2-1.5-.2-2.2 0-.9.1-1.7.4-2.5.3-.9.7-1.7 1.4-2.4.6-.6 1.1-1.2 1.7-1.8.1-.1.3-.2.4-.2 0 .1-.1.3-.2.4-.3.4-.6.7-.9 1.1-.5.6-.9 1.2-1.2 1.8-.4.7-.7 1.4-.9 2.2-.1.4-.2.8-.2 1.2 0 .4-.1.8 0 1.3 0 .6.1 1.1.2 1.6.1.6.2 1.1.2 1.7 0 .7.2 1.4.4 2.1 0 .2.2.3.2.5.3.6.6 1.1 1.1 1.5.2.2.4.5.6.7 0 0 0 .1.1.1v.2zM8 24.6c-.4 0-.7.1-1.1.2-.4.1-.6-.1-.7-.5 0-.1-.1-.3 0-.4.1-.3.3-.3.5-.1.2.2.5.4.7.5.1.1.2.1.4.1.1 0 .2.1.4.2H8zm7.6 2.1c-.3.2-.7.3-1.1.3-.3 0-.6-.1-.9-.1h-.2c-.4.1-.7.1-1.1.2-.1 0-.3 0-.4.1H11c-.4 0-.7-.2-1-.5-.1-.1-.2-.3-.3-.5-.1-.1-.1-.2-.1-.4 0-.1.1-.1.2-.1h.1c.5.3 1.1.4 1.6.5.7.1 1.4.2 2.1.2.4 0 .7.1 1.1.1h.8c.2.1.1.1.1.2zm3.7-2.5c-.7.4-1.5.7-2.3.9-.2 0-.5.1-.7.1-.2 0-.5 0-.7.1-.4.1-.8 0-1.2 0-.3 0-.6-.1-.9 0h-.2c-.4-.1-.9-.2-1.3-.3-.5-.1-1-.3-1.4-.5-.4-.1-.8-.3-1.1-.5-.2-.1-.4-.3-.6-.4-.6-.6-1.2-1.1-1.7-1.6-.4-.5-.8-.9-1.2-1.4-.4-.6-.7-1.2-1-1.9l-.3-.9c-.1-.3-.2-.5-.2-.8v-.8c.3.8.5 1.7.9 2.5.7 1.6 1.7 3 3 4.1 1.4 1.1 2.9 1.8 4.6 2.1.9.2 1.8.2 2.7.2 1.1-.1 2.2-.3 3.2-.8.2-.1.3-.2.5-.2 0 .1 0 .1-.1.1zm.1-8.7c-.6 0-1.1-.5-1.1-1.2 0-.6.5-1.2 1.2-1.2.6 0 1.1.5 1.1 1.2s-.5 1.3-1.2 1.2zm6.2 5.7c0 .4-.1.8-.2 1.2-.1.4-.1.9-.3 1.3-.1.4-.2.7-.4 1.1-.1.3-.3.6-.6.8-.3.2-.5.4-.9.5-.4.2-.7.3-1.2.3h-.9c-.2-.1-.2-.1-.1-.3.1-.2.3-.3.5-.4.3-.2.6-.5.8-.7.7-.7 1.3-1.6 1.9-2.4.4-.4.6-1 .9-1.5.1-.1.1-.2.2-.3.3.2.3.3.3.4zm-15-16.8c1.7-.8 3.5-1.1 5.3-.9.4 0 .8.1 1.1.3l1.8.6c.6.2 1.2.5 1.7.8.7.4 1.3.9 1.9 1.5.8.8 1.5 1.6 2 2.6.3.6.5 1.2.7 1.8.2.7.4 1.5.4 2.2v.9c0 .4-.1.8-.1 1.2v-1c0-1.2-.3-2.3-.6-3.4l-.6-1.5c-.2-.6-.5-1.1-.9-1.6-.1-.1-.3-.1-.4-.2-.1 0-.1 0-.2-.1-.5-.5-1.1-1-1.7-1.5-.8-.6-1.7-1.1-2.6-1.4-.4-.2-.8-.3-1.2-.4-.9-.2-1.8-.4-2.7-.3h-.9c-.3 0-.6.1-1 .2-.6.1-1.2.3-1.7.5h-.1s-.1 0 0-.1c0 0 0-.1-.1-.2m16.2 11.1c-.1-.8 0-1.7 0-2.5-.1-.8-.2-1.6-.4-2.4.5.7.6 1.6.7 2.4 0 .8 0 1.7-.3 2.5zm.6.5c0-.3.1-.7.2-1.1.1-.4.1-.9.1-1.3v-.4c0-.8-.2-1.6-.4-2.4-.4-.9-.8-1.6-1.4-2.4-.5-.6-.9-1.2-1.4-1.8l-.2-.2c.1 0 .1 0 .1.1 1 .8 1.8 1.6 2.4 2.7.5 1 .9 2 1 3.1.3 1.3.1 2.5-.4 3.7z"/>
|
||||
</svg><svg xmlns="http://www.w3.org/2000/svg" xml:space="preserve" width="120" height="30.3" viewBox="28.14 0 120 30.3">
|
||||
<path fill="#2F2E71" d="M57.7 18c.9 0 1.9-.1 2.9.3.9.3 1.5.9 1.7 2 .1 1-.2 1.9-1.1 2.5-1.1.8-2.3.9-3.6 1-1.4 0-2.9 0-4.3-.6-1.6-.7-1.8-2.6-.4-3.6.3-.2.2-.3.1-.5-.7-.8-.7-2.2.3-2.8.3-.2.2-.3 0-.6-1.4-1.6-.7-4.1 1.3-4.8 1.6-.6 3.2-.6 4.8-.1.2.1.3.1.5-.1.3-.3.6-.5.9-.7.5-.3 1.1-.3 1.4 0 .3.4.3.9-.2 1.3-.7.5-.8 1-.6 1.9.4 1.5-.6 2.9-2.1 3.4-1.3.5-2.7.5-4 .2-.3-.1-.5-.1-.6.2-.1.3-.1.6.2.8.2.1.5.2.8.2h2zm-.6-2.7c.3 0 .5 0 .7-.1.8-.2 1.3-.7 1.4-1.4.1-.7-.3-1.3-.9-1.6-.8-.3-1.6-.3-2.4 0-1 .4-1.2 1.7-.6 2.4.5.6 1.2.7 1.8.7zm-.2 4.6h-1.8c-.4 0-.6.2-.7.5-.3.7.1 1.2.9 1.4 1.2.2 2.5.2 3.7-.1l.6-.3c.5-.3.4-1.1-.1-1.3-.2-.1-.5-.2-.7-.2h-1.9zm58.6-3.3h-3.2c-.3 0-.3.1-.3.4.2 1.2 1.3 2.1 2.8 2.3 1.1.1 2.1-.1 2.9-.9.2-.2.4-.3.6-.4.4-.2.8-.2 1.1.1.4.3.4.7.3 1.1-.3.9-.9 1.4-1.7 1.8-2.3 1-4.5.9-6.5-.6-1-.7-1.5-1.8-1.7-3-.3-1.7-.2-3.3.8-4.7 1.3-1.8 3.1-2.4 5.2-2.1 2 .3 3.4 1.3 4 3.2.2.6.3 1.2.2 1.8-.1.8-.4 1.1-1.2 1.1-1.1-.1-2.2-.1-3.3-.1zm-.7-1.9h2.4c.4 0 .4-.1.4-.5-.3-1.1-1.2-1.9-2.5-2-1.5-.1-2.6.6-3.1 1.9-.2.5-.1.6.4.6h2.4zm-23 6.7c-3.3 0-5.5-2.2-5.6-5.5 0-3.2 2.2-5.6 5.4-5.6 3.4 0 5.7 2.2 5.8 5.5 0 3.4-2.2 5.6-5.6 5.6zm0-2.1c1.9 0 3.2-1.3 3.3-3.3.1-2-1.3-3.4-3.2-3.4-1.8 0-3.2 1.4-3.2 3.3-.1 1.9 1.2 3.4 3.1 3.4zm-22.6 2.1c-1.1 0-2.4-.3-3.5-1.4-.3-.4-.6-.8-.7-1.3 0-.4.1-.7.4-.9.3-.2.7-.2 1 0 .3.2.6.4.8.6.9.9 2 1.1 3.2.9.6-.1 1-.5 1-1 .1-.5-.2-1-.8-1.2-.7-.3-1.4-.3-2.1-.5-.8-.2-1.6-.4-2.2-.9-1.5-1.2-1.4-3.5.2-4.6 1.2-.8 2.6-.9 4-.7 1 .1 1.9.5 2.6 1.2.3.3.4.6.5.9.1.4 0 .7-.3 1-.3.3-.7.3-1 .1-.4-.2-.7-.5-1.1-.8-.8-.6-1.7-.8-2.7-.5-.6.1-.9.5-.9 1s.3.9.8 1.1c.9.3 1.9.4 2.9.7.6.2 1.1.4 1.5.8 1.5 1.4 1.1 4.1-.9 5.1-.7.3-1.5.4-2.7.4zm-30.3 0c-1.5 0-3-.3-4.1-1.5-.3-.3-.4-.6-.5-1-.1-.4-.1-.8.3-1.1.4-.2.9-.2 1.3.1.3.2.6.5.8.7.9.8 1.9.9 3 .7.6-.1.9-.5.9-1.1 0-.5-.3-1-.8-1.2l-2.7-.6c-1.1-.3-2-.8-2.4-2-.6-1.7.4-3.4 2.2-3.9 1.7-.5 3.3-.3 4.8.5.6.3 1 .8 1.2 1.4.1.4.1.9-.3 1.1-.4.3-.8.2-1.2-.1-.4-.3-.7-.7-1.2-.9-.8-.4-1.5-.5-2.4-.3-.5.1-.8.4-.8.9s.2.8.7 1c.7.3 1.4.3 2.1.5.8.2 1.5.3 2.2.8 1.2.8 1.5 2.5.9 3.8-.7 1.4-1.9 1.8-3.3 2-.3.2-.5.2-.7.2zM78 15.8v-2.6c0-.3-.1-.4-.4-.4h-1.3c-.6-.1-.9-.5-.9-1s.4-1 .9-1h1.3c.3 0 .4-.1.4-.4V8.9c0-.7.5-1.1 1.1-1.2.6 0 1.1.4 1.2 1v.6c0 .4-.2 1 .1 1.3.3.3.9.1 1.3.1h1.6c.4 0 .7.3.8.8.1.4-.1.8-.4 1.1-.3.2-.6.2-.9.2h-2.1c-.3 0-.4 0-.4.4v4.7c0 1.1.9 1.6 1.9 1.1.3-.1.5-.3.7-.5.4-.3.8-.3 1.2 0 .4.3.4.8.2 1.2-.3.7-.9 1.1-1.5 1.3-1.3.5-2.6.5-3.8-.4-.7-.6-1-1.4-1.1-2.4.1-.7.1-1.6.1-2.4zm24.7-4.1c.8-1 1.8-1.4 3-1.3.7.1 1.4.3 1.9.9.3.4.5.9.4 1.5-.1.4-.3.7-.7.9-.5.2-.9 0-1.3-.3-.7-.7-1.3-.9-2.1-.5-.5.3-.8.7-1 1.3-.2.5-.3 1.1-.3 1.6v4.5c0 .5-.2.9-.6 1.1-.4.2-.8.2-1.2-.1-.4-.3-.5-.7-.5-1.1v-8.4c0-.7.4-1.2 1-1.3.7-.1 1.1.3 1.3 1.1.1-.1.1 0 .1.1zm-54 4.2v4.3c0 .8-.6 1.3-1.4 1.2-.5-.1-.9-.5-.9-1.1v-8.7c0-.7.5-1.1 1.2-1.1s1.1.4 1.2 1.2c-.1 1.3-.1 2.8-.1 4.2zm.3-8.2c0 .8-.6 1.4-1.4 1.4-.8 0-1.5-.6-1.5-1.4 0-.8.7-1.4 1.5-1.4s1.4.6 1.4 1.4z"/>
|
||||
</svg>
|
||||
</a>
|
||||
</div>
|
||||
<div>
|
||||
<div class="title">
|
||||
<span class="sigstore">sigstore </span>
|
||||
<span>authentication successful!</span>
|
||||
</div>
|
||||
{{ if .Autoclose -}}
|
||||
<small name="autoclose"></small>
|
||||
<noscript>
|
||||
<div class="content">
|
||||
<span>You may now close this page.</span>
|
||||
</div>
|
||||
</noscript>
|
||||
{{- else -}}
|
||||
<div class="content">
|
||||
<span>You may now close this page.</span>
|
||||
</div>
|
||||
{{- end }}
|
||||
</div>
|
||||
<div class="anchor">
|
||||
<div class="links">
|
||||
<a href="https://sigstore.dev/" class="link login"><span class="sigstore">sigstore</span> home <span class="arrow">→</span></a>
|
||||
<a href="https://docs.sigstore.dev/" class="link login"><span class="sigstore">sigstore</span> documentation <span class="arrow">→</span></a>
|
||||
<a href="https://blog.sigstore.dev/" class="link"><span class="sigstore">sigstore</span> blog <span class="arrow">→</span></a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<script>
|
||||
document.getElementById("favicon").setAttribute("href", "data:image/svg+xml," + encodeURIComponent(document.getElementById("logo").outerHTML));
|
||||
</script>
|
||||
|
||||
{{ if .Autoclose -}}
|
||||
<script>
|
||||
var timeout = {{ .Timeout }};
|
||||
setTimeout(function() { this.close(); }, timeout*1000);
|
||||
setInterval(function() {
|
||||
timeout--;
|
||||
document.getElementsByName("autoclose")[0].innerHTML = "This page will close automatically in " + timeout + " seconds...";
|
||||
if (timeout <= 0) {
|
||||
document.getElementsByName("autoclose")[0].innerHTML = "This page will close now, thank you!";
|
||||
}
|
||||
}, 1000);
|
||||
</script>
|
||||
{{- end }}
|
||||
</body>
|
||||
</html>
|
||||
`
|
||||
// Parse the template
|
||||
tmpl, err := template.New("success").Parse(successTemplate)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error parsing success template: %w", err)
|
||||
}
|
||||
// Pass autoclose and timeout to the template
|
||||
data := struct {
|
||||
Autoclose bool
|
||||
Timeout int
|
||||
}{
|
||||
autoclose,
|
||||
timeout,
|
||||
}
|
||||
var htmlPage bytes.Buffer
|
||||
if err := tmpl.Execute(&htmlPage, data); err != nil {
|
||||
return "", fmt.Errorf("error executing template: %w", err)
|
||||
}
|
||||
return htmlPage.String(), nil
|
||||
}
|
||||
|
||||
const (
|
||||
// InteractiveSuccessHTML is the page displayed upon success when using a web browser during an interactive Oauth token flow.
|
||||
// InteractiveSuccessHTML (deprecated) is the page displayed upon success when using a web browser during an interactive Oauth token flow.
|
||||
InteractiveSuccessHTML = `<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
|
156
vendor/github.com/sigstore/sigstore/pkg/oauthflow/client_credentials.go
generated
vendored
Normal file
156
vendor/github.com/sigstore/sigstore/pkg/oauthflow/client_credentials.go
generated
vendored
Normal file
@ -0,0 +1,156 @@
|
||||
//
|
||||
// Copyright 2024 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package oauthflow
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// CodeURL fetches the client credentials token authorization endpoint URL from the provider's well-known configuration endpoint
|
||||
func (d *DefaultFlowClientCredentials) CodeURL() (string, error) {
|
||||
if d.codeURL != "" {
|
||||
return d.codeURL, nil
|
||||
}
|
||||
|
||||
wellKnown := strings.TrimSuffix(d.Issuer, "/") + "/.well-known/openid-configuration"
|
||||
/* #nosec */
|
||||
httpClient := &http.Client{
|
||||
Timeout: 3 * time.Second,
|
||||
}
|
||||
resp, err := httpClient.Get(wellKnown)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to read response body: %w", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("%s: %s", resp.Status, body)
|
||||
}
|
||||
|
||||
providerConfig := struct {
|
||||
Issuer string `json:"issuer"`
|
||||
TokenEndpoint string `json:"token_endpoint"`
|
||||
}{}
|
||||
if err = json.Unmarshal(body, &providerConfig); err != nil {
|
||||
return "", fmt.Errorf("oidc: failed to decode provider discovery object: %w", err)
|
||||
}
|
||||
|
||||
if d.Issuer != providerConfig.Issuer {
|
||||
return "", fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", d.Issuer, providerConfig.Issuer)
|
||||
}
|
||||
|
||||
if providerConfig.TokenEndpoint == "" {
|
||||
return "", fmt.Errorf("oidc: client credentials token authorization endpoint not returned by provider")
|
||||
}
|
||||
|
||||
d.codeURL = providerConfig.TokenEndpoint
|
||||
return d.codeURL, nil
|
||||
}
|
||||
|
||||
// DefaultFlowClientCredentials fetches an OIDC Identity token using the Client Credentials Grant flow as specified in RFC8628
|
||||
type DefaultFlowClientCredentials struct {
|
||||
Issuer string
|
||||
codeURL string
|
||||
}
|
||||
|
||||
// NewClientCredentialsFlow creates a new DefaultFlowClientCredentials that retrieves an OIDC Identity Token using a Client Credentials Grant
|
||||
func NewClientCredentialsFlow(issuer string) *DefaultFlowClientCredentials {
|
||||
return &DefaultFlowClientCredentials{
|
||||
Issuer: issuer,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DefaultFlowClientCredentials) clientCredentialsFlow(_ *oidc.Provider, clientID, clientSecret, redirectURL string) (string, error) {
|
||||
data := url.Values{
|
||||
"client_id": []string{clientID},
|
||||
"client_secret": []string{clientSecret},
|
||||
"scope": []string{"openid email"},
|
||||
"grant_type": []string{"client_credentials"},
|
||||
}
|
||||
if redirectURL != "" {
|
||||
// If a redirect uri is provided then use it
|
||||
data["redirect_uri"] = []string{redirectURL}
|
||||
}
|
||||
|
||||
codeURL, err := d.CodeURL()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
/* #nosec */
|
||||
resp, err := http.PostForm(codeURL, data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("%s: %s", resp.Status, b)
|
||||
}
|
||||
|
||||
tr := tokenResp{}
|
||||
if err := json.Unmarshal(b, &tr); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if tr.IDToken != "" {
|
||||
fmt.Println("Token received!")
|
||||
return tr.IDToken, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unexpected error in client flow: %s", tr.Error)
|
||||
}
|
||||
|
||||
// GetIDToken gets an OIDC ID Token from the specified provider using the Client Credentials Grant flow
|
||||
func (d *DefaultFlowClientCredentials) GetIDToken(p *oidc.Provider, cfg oauth2.Config) (*OIDCIDToken, error) {
|
||||
idToken, err := d.clientCredentialsFlow(p, cfg.ClientID, cfg.ClientSecret, cfg.RedirectURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
verifier := p.Verifier(&oidc.Config{ClientID: cfg.ClientID})
|
||||
parsedIDToken, err := verifier.Verify(context.Background(), idToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
subj, err := SubjectFromToken(parsedIDToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &OIDCIDToken{
|
||||
RawString: idToken,
|
||||
Subject: subj,
|
||||
}, nil
|
||||
}
|
12
vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go
generated
vendored
12
vendor/github.com/sigstore/sigstore/pkg/oauthflow/flow.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"github.com/coreos/go-oidc/v3/oidc"
|
||||
"github.com/go-jose/go-jose/v3"
|
||||
@ -46,6 +47,17 @@ type OIDCIDToken struct {
|
||||
Subject string // Subject is the extracted subject from the raw token
|
||||
}
|
||||
|
||||
// init
|
||||
func init() {
|
||||
// set the default HTML page for the DefaultIDTokenGetter
|
||||
htmlPage, err := soauth.GetInteractiveSuccessHTML(false, 10)
|
||||
if err != nil {
|
||||
log.Print("failed to get interactive success html, defaulting to original static page")
|
||||
} else {
|
||||
DefaultIDTokenGetter.HTMLPage = htmlPage
|
||||
}
|
||||
}
|
||||
|
||||
// ConnectorIDOpt requests the value of prov as a the connector_id (either on URL or in form body) on the initial request;
|
||||
// this is used by Dex
|
||||
func ConnectorIDOpt(prov string) oauth2.AuthCodeOption {
|
||||
|
211
vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go
generated
vendored
Normal file
211
vendor/github.com/sigstore/sigstore/pkg/signature/ed25519ph.go
generated
vendored
Normal file
@ -0,0 +1,211 @@
|
||||
//
|
||||
// Copyright 2024 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
)
|
||||
|
||||
var ed25519phSupportedHashFuncs = []crypto.Hash{
|
||||
crypto.SHA512,
|
||||
}
|
||||
|
||||
// ED25519phSigner is a signature.Signer that uses the Ed25519 public-key signature system with pre-hashing
|
||||
type ED25519phSigner struct {
|
||||
priv ed25519.PrivateKey
|
||||
}
|
||||
|
||||
// LoadED25519phSigner calculates signatures using the specified private key.
|
||||
func LoadED25519phSigner(priv ed25519.PrivateKey) (*ED25519phSigner, error) {
|
||||
if priv == nil {
|
||||
return nil, errors.New("invalid ED25519 private key specified")
|
||||
}
|
||||
|
||||
return &ED25519phSigner{
|
||||
priv: priv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ToED25519SignerVerifier creates a ED25519SignerVerifier from a ED25519phSignerVerifier
|
||||
//
|
||||
// Clients that use ED25519phSignerVerifier should use this method to get a
|
||||
// SignerVerifier that uses the same ED25519 private key, but with the Pure
|
||||
// Ed25519 algorithm. This might be necessary to interact with Fulcio, which
|
||||
// only supports the Pure Ed25519 algorithm.
|
||||
func (e ED25519phSignerVerifier) ToED25519SignerVerifier() (*ED25519SignerVerifier, error) {
|
||||
return LoadED25519SignerVerifier(e.priv)
|
||||
}
|
||||
|
||||
// SignMessage signs the provided message. If the message is provided,
|
||||
// this method will compute the digest according to the hash function specified
|
||||
// when the ED25519phSigner was created.
|
||||
//
|
||||
// This function recognizes the following Options listed in order of preference:
|
||||
//
|
||||
// - WithDigest()
|
||||
//
|
||||
// All other options are ignored if specified.
|
||||
func (e ED25519phSigner) SignMessage(message io.Reader, opts ...SignOption) ([]byte, error) {
|
||||
digest, _, err := ComputeDigestForSigning(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return e.priv.Sign(nil, digest, crypto.SHA512)
|
||||
}
|
||||
|
||||
// Public returns the public key that can be used to verify signatures created by
|
||||
// this signer.
|
||||
func (e ED25519phSigner) Public() crypto.PublicKey {
|
||||
if e.priv == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e.priv.Public()
|
||||
}
|
||||
|
||||
// PublicKey returns the public key that can be used to verify signatures created by
|
||||
// this signer. As this value is held in memory, all options provided in arguments
|
||||
// to this method are ignored.
|
||||
func (e ED25519phSigner) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
|
||||
return e.Public(), nil
|
||||
}
|
||||
|
||||
// Sign computes the signature for the specified message; the first and third arguments to this
|
||||
// function are ignored as they are not used by the ED25519ph algorithm.
|
||||
func (e ED25519phSigner) Sign(_ io.Reader, digest []byte, _ crypto.SignerOpts) ([]byte, error) {
|
||||
return e.SignMessage(nil, options.WithDigest(digest))
|
||||
}
|
||||
|
||||
// ED25519phVerifier is a signature.Verifier that uses the Ed25519 public-key signature system
|
||||
type ED25519phVerifier struct {
|
||||
publicKey ed25519.PublicKey
|
||||
}
|
||||
|
||||
// LoadED25519phVerifier returns a Verifier that verifies signatures using the
|
||||
// specified ED25519 public key.
|
||||
func LoadED25519phVerifier(pub ed25519.PublicKey) (*ED25519phVerifier, error) {
|
||||
if pub == nil {
|
||||
return nil, errors.New("invalid ED25519 public key specified")
|
||||
}
|
||||
|
||||
return &ED25519phVerifier{
|
||||
publicKey: pub,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PublicKey returns the public key that is used to verify signatures by
|
||||
// this verifier. As this value is held in memory, all options provided in arguments
|
||||
// to this method are ignored.
|
||||
func (e *ED25519phVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
|
||||
return e.publicKey, nil
|
||||
}
|
||||
|
||||
// VerifySignature verifies the signature for the given message. Unless provided
|
||||
// in an option, the digest of the message will be computed using the hash function specified
|
||||
// when the ED25519phVerifier was created.
|
||||
//
|
||||
// This function returns nil if the verification succeeded, and an error message otherwise.
|
||||
//
|
||||
// This function recognizes the following Options listed in order of preference:
|
||||
//
|
||||
// - WithDigest()
|
||||
//
|
||||
// All other options are ignored if specified.
|
||||
func (e *ED25519phVerifier) VerifySignature(signature, message io.Reader, opts ...VerifyOption) error {
|
||||
if signature == nil {
|
||||
return errors.New("nil signature passed to VerifySignature")
|
||||
}
|
||||
|
||||
digest, _, err := ComputeDigestForVerifying(message, crypto.SHA512, ed25519phSupportedHashFuncs, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sigBytes, err := io.ReadAll(signature)
|
||||
if err != nil {
|
||||
return fmt.Errorf("reading signature: %w", err)
|
||||
}
|
||||
|
||||
if err := ed25519.VerifyWithOptions(e.publicKey, digest, sigBytes, &ed25519.Options{Hash: crypto.SHA512}); err != nil {
|
||||
return fmt.Errorf("failed to verify signature: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ED25519phSignerVerifier is a signature.SignerVerifier that uses the Ed25519 public-key signature system
|
||||
type ED25519phSignerVerifier struct {
|
||||
*ED25519phSigner
|
||||
*ED25519phVerifier
|
||||
}
|
||||
|
||||
// LoadED25519phSignerVerifier creates a combined signer and verifier. This is
|
||||
// a convenience object that simply wraps an instance of ED25519phSigner and ED25519phVerifier.
|
||||
func LoadED25519phSignerVerifier(priv ed25519.PrivateKey) (*ED25519phSignerVerifier, error) {
|
||||
signer, err := LoadED25519phSigner(priv)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing signer: %w", err)
|
||||
}
|
||||
pub, ok := priv.Public().(ed25519.PublicKey)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("given key is not ed25519.PublicKey")
|
||||
}
|
||||
verifier, err := LoadED25519phVerifier(pub)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("initializing verifier: %w", err)
|
||||
}
|
||||
|
||||
return &ED25519phSignerVerifier{
|
||||
ED25519phSigner: signer,
|
||||
ED25519phVerifier: verifier,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewDefaultED25519phSignerVerifier creates a combined signer and verifier using ED25519.
|
||||
// This creates a new ED25519 key using crypto/rand as an entropy source.
|
||||
func NewDefaultED25519phSignerVerifier() (*ED25519phSignerVerifier, ed25519.PrivateKey, error) {
|
||||
return NewED25519phSignerVerifier(rand.Reader)
|
||||
}
|
||||
|
||||
// NewED25519phSignerVerifier creates a combined signer and verifier using ED25519.
|
||||
// This creates a new ED25519 key using the specified entropy source.
|
||||
func NewED25519phSignerVerifier(rand io.Reader) (*ED25519phSignerVerifier, ed25519.PrivateKey, error) {
|
||||
_, priv, err := ed25519.GenerateKey(rand)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
sv, err := LoadED25519phSignerVerifier(priv)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return sv, priv, nil
|
||||
}
|
||||
|
||||
// PublicKey returns the public key that is used to verify signatures by
|
||||
// this verifier. As this value is held in memory, all options provided in arguments
|
||||
// to this method are ignored.
|
||||
func (e ED25519phSignerVerifier) PublicKey(_ ...PublicKeyOption) (crypto.PublicKey, error) {
|
||||
return e.publicKey, nil
|
||||
}
|
8
vendor/github.com/sigstore/sigstore/pkg/signature/options.go
generated
vendored
8
vendor/github.com/sigstore/sigstore/pkg/signature/options.go
generated
vendored
@ -18,6 +18,7 @@ package signature
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
@ -55,3 +56,10 @@ type VerifyOption interface {
|
||||
RPCOption
|
||||
MessageOption
|
||||
}
|
||||
|
||||
// LoadOption specifies options to be used when creating a Signer/Verifier
|
||||
type LoadOption interface {
|
||||
ApplyHash(*crypto.Hash)
|
||||
ApplyED25519ph(*bool)
|
||||
ApplyRSAPSS(**rsa.PSSOptions)
|
||||
}
|
||||
|
76
vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go
generated
vendored
Normal file
76
vendor/github.com/sigstore/sigstore/pkg/signature/options/loadoptions.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
//
|
||||
// Copyright 2024 The Sigstore Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// RequestHash implements the functional option pattern for setting a Hash
|
||||
// function when loading a signer or verifier
|
||||
type RequestHash struct {
|
||||
NoOpOptionImpl
|
||||
hashFunc crypto.Hash
|
||||
}
|
||||
|
||||
// ApplyHash sets the hash as requested by the functional option
|
||||
func (r RequestHash) ApplyHash(hash *crypto.Hash) {
|
||||
*hash = r.hashFunc
|
||||
}
|
||||
|
||||
// WithHash specifies that the given hash function should be used when loading a signer or verifier
|
||||
func WithHash(hash crypto.Hash) RequestHash {
|
||||
return RequestHash{hashFunc: hash}
|
||||
}
|
||||
|
||||
// RequestED25519ph implements the functional option pattern for specifying
|
||||
// ED25519ph (pre-hashed) should be used when loading a signer or verifier and a
|
||||
// ED25519 key is
|
||||
type RequestED25519ph struct {
|
||||
NoOpOptionImpl
|
||||
useED25519ph bool
|
||||
}
|
||||
|
||||
// ApplyED25519ph sets the ED25519ph flag as requested by the functional option
|
||||
func (r RequestED25519ph) ApplyED25519ph(useED25519ph *bool) {
|
||||
*useED25519ph = r.useED25519ph
|
||||
}
|
||||
|
||||
// WithED25519ph specifies that the ED25519ph algorithm should be used when a ED25519 key is used
|
||||
func WithED25519ph() RequestED25519ph {
|
||||
return RequestED25519ph{useED25519ph: true}
|
||||
}
|
||||
|
||||
// RequestPSSOptions implements the functional option pattern for specifying RSA
|
||||
// PSS should be used when loading a signer or verifier and a RSA key is
|
||||
// detected
|
||||
type RequestPSSOptions struct {
|
||||
NoOpOptionImpl
|
||||
opts *rsa.PSSOptions
|
||||
}
|
||||
|
||||
// ApplyRSAPSS sets the RSAPSS options as requested by the functional option
|
||||
func (r RequestPSSOptions) ApplyRSAPSS(opts **rsa.PSSOptions) {
|
||||
*opts = r.opts
|
||||
}
|
||||
|
||||
// WithRSAPSS specifies that the RSAPSS algorithm should be used when a RSA key is used
|
||||
// Note that the RSA PSSOptions contains an hash algorithm, which will override
|
||||
// the hash function specified with WithHash.
|
||||
func WithRSAPSS(opts *rsa.PSSOptions) RequestPSSOptions {
|
||||
return RequestPSSOptions{opts: opts}
|
||||
}
|
10
vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
generated
vendored
10
vendor/github.com/sigstore/sigstore/pkg/signature/options/noop.go
generated
vendored
@ -18,6 +18,7 @@ package options
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/rsa"
|
||||
"io"
|
||||
)
|
||||
|
||||
@ -47,3 +48,12 @@ func (NoOpOptionImpl) ApplyKeyVersion(_ *string) {}
|
||||
|
||||
// ApplyKeyVersionUsed is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyKeyVersionUsed(_ **string) {}
|
||||
|
||||
// ApplyHash is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyHash(_ *crypto.Hash) {}
|
||||
|
||||
// ApplyED25519ph is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyED25519ph(_ *bool) {}
|
||||
|
||||
// ApplyRSAPSS is a no-op required to fully implement the requisite interfaces
|
||||
func (NoOpOptionImpl) ApplyRSAPSS(_ **rsa.PSSOptions) {}
|
||||
|
36
vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
generated
vendored
36
vendor/github.com/sigstore/sigstore/pkg/signature/signer.go
generated
vendored
@ -30,6 +30,7 @@ import (
|
||||
_ "crypto/sha512"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
|
||||
// these ensure we have the implementations loaded
|
||||
_ "golang.org/x/crypto/sha3"
|
||||
@ -59,12 +60,33 @@ func (s SignerOpts) HashFunc() crypto.Hash {
|
||||
// If privateKey is an RSA key, a RSAPKCS1v15Signer will be returned. If a
|
||||
// RSAPSSSigner is desired instead, use the LoadRSAPSSSigner() method directly.
|
||||
func LoadSigner(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (Signer, error) {
|
||||
return LoadSignerWithOpts(privateKey, options.WithHash(hashFunc))
|
||||
}
|
||||
|
||||
// LoadSignerWithOpts returns a signature.Signer based on the algorithm of the private key
|
||||
// provided.
|
||||
func LoadSignerWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (Signer, error) {
|
||||
var rsaPSSOptions *rsa.PSSOptions
|
||||
var useED25519ph bool
|
||||
hashFunc := crypto.SHA256
|
||||
for _, o := range opts {
|
||||
o.ApplyED25519ph(&useED25519ph)
|
||||
o.ApplyHash(&hashFunc)
|
||||
o.ApplyRSAPSS(&rsaPSSOptions)
|
||||
}
|
||||
|
||||
switch pk := privateKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
if rsaPSSOptions != nil {
|
||||
return LoadRSAPSSSigner(pk, hashFunc, rsaPSSOptions)
|
||||
}
|
||||
return LoadRSAPKCS1v15Signer(pk, hashFunc)
|
||||
case *ecdsa.PrivateKey:
|
||||
return LoadECDSASigner(pk, hashFunc)
|
||||
case ed25519.PrivateKey:
|
||||
if useED25519ph {
|
||||
return LoadED25519phSigner(pk)
|
||||
}
|
||||
return LoadED25519Signer(pk)
|
||||
}
|
||||
return nil, errors.New("unsupported public key type")
|
||||
@ -87,3 +109,17 @@ func LoadSignerFromPEMFile(path string, hashFunc crypto.Hash, pf cryptoutils.Pas
|
||||
}
|
||||
return LoadSigner(priv, hashFunc)
|
||||
}
|
||||
|
||||
// LoadSignerFromPEMFileWithOpts returns a signature.Signer based on the algorithm of the private key
|
||||
// in the file. The Signer will use the hash function specified in the options when computing digests.
|
||||
func LoadSignerFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (Signer, error) {
|
||||
fileBytes, err := os.ReadFile(filepath.Clean(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadSignerWithOpts(priv, opts...)
|
||||
}
|
||||
|
36
vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
generated
vendored
36
vendor/github.com/sigstore/sigstore/pkg/signature/signerverifier.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
)
|
||||
|
||||
// SignerVerifier creates and verifies digital signatures over a message using a specified key pair
|
||||
@ -39,12 +40,33 @@ type SignerVerifier interface {
|
||||
// If privateKey is an RSA key, a RSAPKCS1v15SignerVerifier will be returned. If a
|
||||
// RSAPSSSignerVerifier is desired instead, use the LoadRSAPSSSignerVerifier() method directly.
|
||||
func LoadSignerVerifier(privateKey crypto.PrivateKey, hashFunc crypto.Hash) (SignerVerifier, error) {
|
||||
return LoadSignerVerifierWithOpts(privateKey, options.WithHash(hashFunc))
|
||||
}
|
||||
|
||||
// LoadSignerVerifierWithOpts returns a signature.SignerVerifier based on the
|
||||
// algorithm of the private key provided and the user's choice.
|
||||
func LoadSignerVerifierWithOpts(privateKey crypto.PrivateKey, opts ...LoadOption) (SignerVerifier, error) {
|
||||
var rsaPSSOptions *rsa.PSSOptions
|
||||
var useED25519ph bool
|
||||
hashFunc := crypto.SHA256
|
||||
for _, o := range opts {
|
||||
o.ApplyED25519ph(&useED25519ph)
|
||||
o.ApplyHash(&hashFunc)
|
||||
o.ApplyRSAPSS(&rsaPSSOptions)
|
||||
}
|
||||
|
||||
switch pk := privateKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
if rsaPSSOptions != nil {
|
||||
return LoadRSAPSSSignerVerifier(pk, hashFunc, rsaPSSOptions)
|
||||
}
|
||||
return LoadRSAPKCS1v15SignerVerifier(pk, hashFunc)
|
||||
case *ecdsa.PrivateKey:
|
||||
return LoadECDSASignerVerifier(pk, hashFunc)
|
||||
case ed25519.PrivateKey:
|
||||
if useED25519ph {
|
||||
return LoadED25519phSignerVerifier(pk)
|
||||
}
|
||||
return LoadED25519SignerVerifier(pk)
|
||||
}
|
||||
return nil, errors.New("unsupported public key type")
|
||||
@ -67,3 +89,17 @@ func LoadSignerVerifierFromPEMFile(path string, hashFunc crypto.Hash, pf cryptou
|
||||
}
|
||||
return LoadSignerVerifier(priv, hashFunc)
|
||||
}
|
||||
|
||||
// LoadSignerVerifierFromPEMFileWithOpts returns a signature.SignerVerifier based on the algorithm of the private key
|
||||
// in the file. The SignerVerifier will use the hash function specified in the options when computing digests.
|
||||
func LoadSignerVerifierFromPEMFileWithOpts(path string, pf cryptoutils.PassFunc, opts ...LoadOption) (SignerVerifier, error) {
|
||||
fileBytes, err := os.ReadFile(filepath.Clean(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
priv, err := cryptoutils.UnmarshalPEMToPrivateKey(fileBytes, pf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return LoadSignerVerifierWithOpts(priv, opts...)
|
||||
}
|
||||
|
38
vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
generated
vendored
38
vendor/github.com/sigstore/sigstore/pkg/signature/verifier.go
generated
vendored
@ -26,6 +26,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/sigstore/sigstore/pkg/cryptoutils"
|
||||
"github.com/sigstore/sigstore/pkg/signature/options"
|
||||
)
|
||||
|
||||
// Verifier verifies the digital signature using a specified public key
|
||||
@ -40,12 +41,33 @@ type Verifier interface {
|
||||
// If publicKey is an RSA key, a RSAPKCS1v15Verifier will be returned. If a
|
||||
// RSAPSSVerifier is desired instead, use the LoadRSAPSSVerifier() method directly.
|
||||
func LoadVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (Verifier, error) {
|
||||
return LoadVerifierWithOpts(publicKey, options.WithHash(hashFunc))
|
||||
}
|
||||
|
||||
// LoadVerifierWithOpts returns a signature.Verifier based on the algorithm of the public key
|
||||
// provided that will use the hash function specified when computing digests.
|
||||
func LoadVerifierWithOpts(publicKey crypto.PublicKey, opts ...LoadOption) (Verifier, error) {
|
||||
var rsaPSSOptions *rsa.PSSOptions
|
||||
var useED25519ph bool
|
||||
hashFunc := crypto.SHA256
|
||||
for _, o := range opts {
|
||||
o.ApplyED25519ph(&useED25519ph)
|
||||
o.ApplyHash(&hashFunc)
|
||||
o.ApplyRSAPSS(&rsaPSSOptions)
|
||||
}
|
||||
|
||||
switch pk := publicKey.(type) {
|
||||
case *rsa.PublicKey:
|
||||
if rsaPSSOptions != nil {
|
||||
return LoadRSAPSSVerifier(pk, hashFunc, rsaPSSOptions)
|
||||
}
|
||||
return LoadRSAPKCS1v15Verifier(pk, hashFunc)
|
||||
case *ecdsa.PublicKey:
|
||||
return LoadECDSAVerifier(pk, hashFunc)
|
||||
case ed25519.PublicKey:
|
||||
if useED25519ph {
|
||||
return LoadED25519phVerifier(pk)
|
||||
}
|
||||
return LoadED25519Verifier(pk)
|
||||
}
|
||||
return nil, errors.New("unsupported public key type")
|
||||
@ -98,3 +120,19 @@ func LoadVerifierFromPEMFile(path string, hashFunc crypto.Hash) (Verifier, error
|
||||
|
||||
return LoadVerifier(pubKey, hashFunc)
|
||||
}
|
||||
|
||||
// LoadVerifierFromPEMFileWithOpts returns a signature.Verifier based on the contents of a
|
||||
// file located at path. The Verifier wil use the hash function specified in the options when computing digests.
|
||||
func LoadVerifierFromPEMFileWithOpts(path string, opts ...LoadOption) (Verifier, error) {
|
||||
fileBytes, err := os.ReadFile(filepath.Clean(path))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pubKey, err := cryptoutils.UnmarshalPEMToPublicKey(fileBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return LoadVerifierWithOpts(pubKey, opts...)
|
||||
}
|
||||
|
28
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
28
vendor/github.com/stretchr/testify/assert/assertion_compare.go
generated
vendored
@ -28,6 +28,8 @@ var (
|
||||
uint32Type = reflect.TypeOf(uint32(1))
|
||||
uint64Type = reflect.TypeOf(uint64(1))
|
||||
|
||||
uintptrType = reflect.TypeOf(uintptr(1))
|
||||
|
||||
float32Type = reflect.TypeOf(float32(1))
|
||||
float64Type = reflect.TypeOf(float64(1))
|
||||
|
||||
@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
case reflect.Struct:
|
||||
{
|
||||
// All structs enter here. We're not interested in most types.
|
||||
if !canConvert(obj1Value, timeType) {
|
||||
if !obj1Value.CanConvert(timeType) {
|
||||
break
|
||||
}
|
||||
|
||||
// time.Time can compared!
|
||||
// time.Time can be compared!
|
||||
timeObj1, ok := obj1.(time.Time)
|
||||
if !ok {
|
||||
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
|
||||
@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
case reflect.Slice:
|
||||
{
|
||||
// We only care about the []byte type.
|
||||
if !canConvert(obj1Value, bytesType) {
|
||||
if !obj1Value.CanConvert(bytesType) {
|
||||
break
|
||||
}
|
||||
|
||||
@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||
|
||||
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
|
||||
}
|
||||
case reflect.Uintptr:
|
||||
{
|
||||
uintptrObj1, ok := obj1.(uintptr)
|
||||
if !ok {
|
||||
uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
|
||||
}
|
||||
uintptrObj2, ok := obj2.(uintptr)
|
||||
if !ok {
|
||||
uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
|
||||
}
|
||||
if uintptrObj1 > uintptrObj2 {
|
||||
return compareGreater, true
|
||||
}
|
||||
if uintptrObj1 == uintptrObj2 {
|
||||
return compareEqual, true
|
||||
}
|
||||
if uintptrObj1 < uintptrObj2 {
|
||||
return compareLess, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return compareEqual, false
|
||||
|
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
//go:build go1.17
|
||||
// +build go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_legacy.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Wrapper around reflect.Value.CanConvert, for compatibility
|
||||
// reasons.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return value.CanConvert(to)
|
||||
}
|
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
//go:build !go1.17
|
||||
// +build !go1.17
|
||||
|
||||
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||
// merged/removed with assertion_compare_go1.17_test.go and
|
||||
// assertion_compare_can_convert.go
|
||||
|
||||
package assert
|
||||
|
||||
import "reflect"
|
||||
|
||||
// Older versions of Go does not have the reflect.Value.CanConvert
|
||||
// method.
|
||||
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||
return false
|
||||
}
|
32
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
32
vendor/github.com/stretchr/testify/assert/assertion_format.go
generated
vendored
@ -1,7 +1,4 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
|
||||
|
||||
package assert
|
||||
|
||||
@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
|
||||
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
|
||||
@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
|
||||
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotImplementsf asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotNilf asserts that the specified object is not nil.
|
||||
//
|
||||
// assert.NotNilf(t, err, "error message %s", "formatted")
|
||||
@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
|
||||
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
|
||||
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
|
||||
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
|
||||
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subsetf asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
|
||||
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
|
||||
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
59
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
59
vendor/github.com/stretchr/testify/assert/assertion_forward.go
generated
vendored
@ -1,7 +1,4 @@
|
||||
/*
|
||||
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
|
||||
* THIS FILE MUST NOT BE EDITED BY HAND
|
||||
*/
|
||||
// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
|
||||
|
||||
package assert
|
||||
|
||||
@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
|
||||
return EqualExportedValuesf(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// EqualValues asserts that two objects are equal or convertable to the same types
|
||||
// EqualValues asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValues(uint32(123), int32(123))
|
||||
@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
|
||||
return EqualValues(a.t, expected, actual, msgAndArgs...)
|
||||
}
|
||||
|
||||
// EqualValuesf asserts that two objects are equal or convertable to the same types
|
||||
// EqualValuesf asserts that two objects are equal or convertible to the same types
|
||||
// and equal.
|
||||
//
|
||||
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
|
||||
@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
|
||||
return NotErrorIsf(a.t, err, target, msg, args...)
|
||||
}
|
||||
|
||||
// NotImplements asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// a.NotImplements((*MyInterface)(nil), new(MyObject))
|
||||
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotImplementsf asserts that an object does not implement the specified interface.
|
||||
//
|
||||
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
|
||||
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
}
|
||||
return NotImplementsf(a.t, interfaceObject, object, msg, args...)
|
||||
}
|
||||
|
||||
// NotNil asserts that the specified object is not nil.
|
||||
//
|
||||
// a.NotNil(err)
|
||||
@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
|
||||
return NotSamef(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// NotSubset asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubset asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
|
||||
// a.NotSubset([1, 3, 4], [1, 2])
|
||||
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
|
||||
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
|
||||
return NotSubset(a.t, list, subset, msgAndArgs...)
|
||||
}
|
||||
|
||||
// NotSubsetf asserts that the specified list(array, slice...) contains not all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT
|
||||
// contain all elements given in the specified subset list(array, slice...) or
|
||||
// map.
|
||||
//
|
||||
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
|
||||
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
|
||||
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
|
||||
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
|
||||
return Samef(a.t, expected, actual, msg, args...)
|
||||
}
|
||||
|
||||
// Subset asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subset asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
|
||||
// a.Subset([1, 2, 3], [1, 2])
|
||||
// a.Subset({"x": 1, "y": 2}, {"x": 1})
|
||||
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
|
||||
return Subset(a.t, list, subset, msgAndArgs...)
|
||||
}
|
||||
|
||||
// Subsetf asserts that the specified list(array, slice...) contains all
|
||||
// elements given in the specified subset(array, slice...).
|
||||
// Subsetf asserts that the specified list(array, slice...) or map contains all
|
||||
// elements given in the specified subset list(array, slice...) or map.
|
||||
//
|
||||
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
|
||||
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
|
||||
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
|
||||
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
|
||||
if h, ok := a.t.(tHelper); ok {
|
||||
h.Helper()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user