mirror of
https://github.com/containers/skopeo.git
synced 2025-05-07 23:46:46 +00:00
fix(deps): update module github.com/containers/storage to v1.52.0
... and c/image/v5 to main Signed-off-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
parent
6baa928c1b
commit
58ff9fdb27
go.modgo.sum
vendor/github.com
Microsoft/hcsshim
.golangci.ymlREADME.md
computestorage
container.goerrors.gointernal
containers
image/v5
copy
internal
blobinfocache
imagedestination
imagesource
manifest
pkg/platform
private
manifest
oci/archive
pkg/blobinfocache
signature
storage
version
storage
.cirrus.ymlVERSIONstorage.confstorage.conf-freebsdstore.go
drivers
layers.gopkg
archive
chunked
config
homedir
unshare
types
utils.gocoreos/go-oidc/v3/oidc
docker
distribution/reference
helpers_deprecated.gonormalize_deprecated.goreference_deprecated.goregexp_deprecated.gosort_deprecated.go
docker-credential-helpers
docker
60
go.mod
60
go.mod
@ -4,9 +4,9 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/containers/common v0.57.2
|
||||
github.com/containers/image/v5 v5.29.1
|
||||
github.com/containers/image/v5 v5.29.2-0.20240119225553-e6e119441091
|
||||
github.com/containers/ocicrypt v1.1.9
|
||||
github.com/containers/storage v1.51.0
|
||||
github.com/containers/storage v1.52.0
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6
|
||||
@ -26,44 +26,45 @@ require (
|
||||
github.com/BurntSushi/toml v1.3.2 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.2.1
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.2 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/containerd/cgroups/v3 v3.0.2 // indirect
|
||||
github.com/containerd/containerd v1.7.9 // indirect
|
||||
github.com/containerd/containerd v1.7.12 // indirect
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.7.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 // indirect
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f // indirect
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/docker/docker v24.0.7+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.0 // indirect
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/docker v25.0.0+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.1 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 // indirect
|
||||
github.com/go-logr/logr v1.3.0 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.21.4 // indirect
|
||||
github.com/go-openapi/errors v0.20.4 // indirect
|
||||
github.com/go-openapi/errors v0.21.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.6 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.2 // indirect
|
||||
github.com/go-openapi/loads v0.21.2 // indirect
|
||||
github.com/go-openapi/runtime v0.26.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.9 // indirect
|
||||
github.com/go-openapi/strfmt v0.21.7 // indirect
|
||||
github.com/go-openapi/swag v0.22.4 // indirect
|
||||
github.com/go-openapi/strfmt v0.22.0 // indirect
|
||||
github.com/go-openapi/swag v0.22.7 // indirect
|
||||
github.com/go-openapi/validate v0.22.1 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/google/go-containerregistry v0.16.1 // indirect
|
||||
github.com/google/go-containerregistry v0.17.0 // indirect
|
||||
github.com/google/go-intervals v0.0.2 // indirect
|
||||
github.com/google/uuid v1.3.1 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/google/uuid v1.5.0 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
@ -71,13 +72,13 @@ require (
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.3 // indirect
|
||||
github.com/klauspost/compress v1.17.4 // indirect
|
||||
github.com/klauspost/pgzip v1.2.6 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.18 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.19 // indirect
|
||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
||||
github.com/mistifyio/go-zfs/v3 v3.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
@ -85,7 +86,7 @@ require (
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/oklog/ulid v1.3.1 // indirect
|
||||
github.com/opencontainers/runc v1.1.10 // indirect
|
||||
github.com/opencontainers/runc v1.1.11 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
github.com/opencontainers/selinux v1.11.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
@ -95,39 +96,40 @@ require (
|
||||
github.com/proglottis/gpgme v0.1.3 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/russross/blackfriday v2.0.0+incompatible // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/segmentio/ksuid v1.0.4 // indirect
|
||||
github.com/sigstore/fulcio v1.4.3 // indirect
|
||||
github.com/sigstore/rekor v1.2.2 // indirect
|
||||
github.com/sigstore/sigstore v1.7.5 // indirect
|
||||
github.com/sigstore/sigstore v1.8.1 // indirect
|
||||
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.0 // indirect
|
||||
github.com/sylabs/sif/v2 v2.15.1 // indirect
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 // indirect
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/vbatts/tar-split v0.11.5 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.6.2 // indirect
|
||||
github.com/vbauerster/mpb/v8 v8.7.2 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
go.mongodb.org/mongo-driver v1.11.3 // indirect
|
||||
go.mongodb.org/mongo-driver v1.13.1 // indirect
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect
|
||||
go.opentelemetry.io/otel v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.19.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.19.0 // indirect
|
||||
golang.org/x/crypto v0.16.0 // indirect
|
||||
golang.org/x/crypto v0.18.0 // indirect
|
||||
golang.org/x/mod v0.14.0 // indirect
|
||||
golang.org/x/net v0.19.0 // indirect
|
||||
golang.org/x/oauth2 v0.14.0 // indirect
|
||||
golang.org/x/sync v0.5.0 // indirect
|
||||
golang.org/x/net v0.20.0 // indirect
|
||||
golang.org/x/oauth2 v0.16.0 // indirect
|
||||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.16.0 // indirect
|
||||
golang.org/x/text v0.14.0 // indirect
|
||||
golang.org/x/tools v0.16.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect
|
||||
google.golang.org/grpc v1.58.3 // indirect
|
||||
google.golang.org/grpc v1.59.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
155
go.sum
155
go.sum
@ -10,8 +10,8 @@ github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0
|
||||
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
|
||||
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.1 h1:Hy+xzYujv7urO5wrgcG58SPMOXNLrj4WCJbySs2XX/A=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.1/go.mod h1:Y1a1S0QlYp1mBpyvGiuEdOfZqnao+0uX5AWHXQ5NhZU=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.2 h1:gfKebjq3Mq17Ys+4cjE8vc2h6tZVeqCGb9a7vBVqpAk=
|
||||
github.com/Microsoft/hcsshim v0.12.0-rc.2/go.mod h1:G2TZhBED5frlh/hsuxV5CDh/ylkSFknPAMPpQg9owQw=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow=
|
||||
@ -22,32 +22,34 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0=
|
||||
github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE=
|
||||
github.com/containerd/containerd v1.7.9 h1:KOhK01szQbM80YfW1H6RZKh85PHGqY/9OcEZ35Je8sc=
|
||||
github.com/containerd/containerd v1.7.9/go.mod h1:0/W44LWEYfSHoxBtsHIiNU/duEkgpMokemafHVCpq9Y=
|
||||
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
|
||||
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
|
||||
github.com/containers/common v0.57.2 h1:50Zp9VuXt2u5uNTKEc4aU2+PfWzFNAulD9JX43VihyI=
|
||||
github.com/containers/common v0.57.2/go.mod h1:ZG9ab1bEssX98ZBclWFIyzx4+MyUN8dXj1oSEugp7N0=
|
||||
github.com/containers/image/v5 v5.29.1 h1:9COTXQpl3FgrW/jw/roLAWlW4TN9ly7/bCAKY76wYl8=
|
||||
github.com/containers/image/v5 v5.29.1/go.mod h1:kQ7qcDsps424ZAz24thD+x7+dJw1vgur3A9tTDsj97E=
|
||||
github.com/containers/image/v5 v5.29.2-0.20240119225553-e6e119441091 h1:sAu48Kd2gfw6QxfZb/58tbMNj0bfClSj9CojnI1XfgA=
|
||||
github.com/containers/image/v5 v5.29.2-0.20240119225553-e6e119441091/go.mod h1:OnS8tVWtDcn5AIB6qkS7xjM5HO1hNw2NzmHe9H35CCY=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA=
|
||||
github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY=
|
||||
github.com/containers/ocicrypt v1.1.9 h1:2Csfba4jse85Raxk5HIyEk8OwZNjRvfkhEGijOjIdEM=
|
||||
github.com/containers/ocicrypt v1.1.9/go.mod h1:dTKx1918d8TDkxXvarscpNVY+lyPakPNFN4jwA9GBys=
|
||||
github.com/containers/storage v1.51.0 h1:AowbcpiWXzAjHosKz7MKvPEqpyX+ryZA/ZurytRrFNA=
|
||||
github.com/containers/storage v1.51.0/go.mod h1:ybl8a3j1PPtpyaEi/5A6TOFs+5TrEyObeKJzVtkUlfc=
|
||||
github.com/coreos/go-oidc/v3 v3.7.0 h1:FTdj0uexT4diYIPlF4yoFVI5MRO1r5+SEcIpEw9vC0o=
|
||||
github.com/coreos/go-oidc/v3 v3.7.0/go.mod h1:yQzSCqBnK3e6Fs5l+f5i0F8Kwf0zpH9bPEsbY00KanM=
|
||||
github.com/containers/storage v1.52.0 h1:8QFFeJg2cQFN0TyJguxHrSz3bl7XtMRnfXrTsvLVkuY=
|
||||
github.com/containers/storage v1.52.0/go.mod h1:PE+L330tisEjQrAVkfAlW8ECvqzc/jusrxJzu9TEi2w=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0 h1:0J/ogVOd4y8P0f0xUh8l9t07xRP/d8tccvjHl2dcsSo=
|
||||
github.com/coreos/go-oidc/v3 v3.9.0/go.mod h1:rTKz2PYwftcrtoCzV5g5kvfJoWcm0Mk8AF8y1iAQro4=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f h1:eHnXnuK47UlSTOQexbzxAZfekVz6i+LKRdj1CU5DPaM=
|
||||
github.com/cyberphone/json-canonicalization v0.0.0-20231217050601-ba74d44ecf5f/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -55,15 +57,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
|
||||
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v24.0.7+incompatible h1:wa/nIwYFW7BVTGa7SWPVyyXU9lgORqUb1xfI36MSkFg=
|
||||
github.com/docker/cli v25.0.0+incompatible h1:zaimaQdnX7fYWFqzN88exE9LDEvRslexpFowZBX6GoQ=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v24.0.7+incompatible h1:Wo6l37AuwP3JaMnZa226lzVXGA3F9Ig1seQen0cKYlM=
|
||||
github.com/docker/docker v24.0.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/docker v25.0.0+incompatible h1:g9b6wZTblhMgzOT2tspESstfw6ySZ9kdm94BLDKaZac=
|
||||
github.com/docker/docker v25.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo=
|
||||
github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
@ -74,9 +76,9 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
|
||||
github.com/facebookgo/limitgroup v0.0.0-20150612190941-6abd8d71ec01 h1:IeaD1VDVBPlx3viJT9Md8if8IxxJnO+x0JCGb054heg=
|
||||
github.com/facebookgo/muster v0.0.0-20150708232844-fd3d7953fd52 h1:a4DFiKFJiDRGFD1qIcqGLX/WlUMD9dyLSLDt+9QZgt8=
|
||||
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1 h1:pWmKFVtt+Jl0vBZTIpz/eAKwsm6LkIxDVVbFHKkchhA=
|
||||
github.com/go-jose/go-jose/v3 v3.0.1/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
@ -90,8 +92,8 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy
|
||||
github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
|
||||
github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M=
|
||||
github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk=
|
||||
github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY=
|
||||
github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
|
||||
@ -112,17 +114,17 @@ github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6
|
||||
github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg=
|
||||
github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
|
||||
github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg=
|
||||
github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k=
|
||||
github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew=
|
||||
github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI=
|
||||
github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
|
||||
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
|
||||
github.com/go-openapi/swag v0.22.7 h1:JWrc1uc/P9cSomxfnsFSVWoE1FW6bNbrVPmpQYpCcR8=
|
||||
github.com/go-openapi/swag v0.22.7/go.mod h1:Gl91UqO+btAM0plGGxHqJcQZ1ZTy6jbmridBTsDy8A0=
|
||||
github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU=
|
||||
github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg=
|
||||
github.com/go-rod/rod v0.114.4 h1:FpkNFukjCuZLwnoLs+S9aCL95o/EMec6M+41UmvQay8=
|
||||
github.com/go-rod/rod v0.114.5 h1:1x6oqnslwFVuXJbJifgxspJUd3O4ntaGhRLHt+4Er9c=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg=
|
||||
@ -180,8 +182,8 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ=
|
||||
github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-containerregistry v0.17.0 h1:5p+zYs/R4VGHkhyvgWurWrpJ2hW4Vv9fQI+GzdcwXLk=
|
||||
github.com/google/go-containerregistry v0.17.0/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ=
|
||||
github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM=
|
||||
github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
@ -189,27 +191,26 @@ github.com/google/pprof v0.0.0-20230323073829-e72429f035bd h1:r8yyd+DJDmsUhGrRBx
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4=
|
||||
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU=
|
||||
github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
|
||||
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI=
|
||||
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
|
||||
github.com/hashicorp/go-hclog v1.2.0 h1:La19f8d7WIlm4ogzNHB0JGqs5AUDAZ2UfCY4sJXcJdM=
|
||||
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5 h1:bJj+Pj19UZMIweq/iie+1u5YCdGrnxCT9yvm0e+Nd5M=
|
||||
github.com/hashicorp/go-retryablehttp v0.7.5/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
|
||||
github.com/honeycombio/beeline-go v1.10.0 h1:cUDe555oqvw8oD76BQJ8alk7FP0JZ/M/zXpNvOEDLDc=
|
||||
github.com/honeycombio/libhoney-go v1.16.0 h1:kPpqoz6vbOzgp7jC6SR7SkNj7rua7rgxvznI6M3KdHc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4=
|
||||
github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs=
|
||||
github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
@ -221,8 +222,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.17.3 h1:qkRjuerhUU1EmXLYGkSH6EZL+vPSxIrYjLNAK4slzwA=
|
||||
github.com/klauspost/compress v1.17.3/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
|
||||
github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
|
||||
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
@ -235,8 +236,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 h1:unJdfS94Y3k85TKy+mvKzjW5R9rIC+Lv4KGbE7uNu0I=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6/go.mod h1:PUgW5vI9ANEaV6qv9a6EKu8gAySgwf0xrzG9xIB/CK0=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e h1:RLTpX495BXToqxpM90Ws4hXEo4Wfh81jr9DX1n/4WOo=
|
||||
github.com/letsencrypt/boulder v0.0.0-20230907030200-6d76a0f91e1e/go.mod h1:EAuqr9VFWxBi9nD5jc/EA2MT1RFty9288TF6zdtYoCU=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
@ -244,12 +245,14 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE=
|
||||
github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk=
|
||||
github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.18 h1:JL0eqdCOq6DJVNPSvArO/bIV9/P7fbGrV00LZHc+5aI=
|
||||
github.com/mattn/go-sqlite3 v1.14.18/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-sqlite3 v1.14.19 h1:fhGleo2h1p8tVChob4I9HpmVFIAkKGpiukdrgQbWfGI=
|
||||
github.com/mattn/go-sqlite3 v1.14.19/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
@ -280,8 +283,8 @@ github.com/opencontainers/image-spec v1.1.0-rc6 h1:XDqvyKsJEbRtATzkgItUqBA7QHk58
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3 h1:ZR837lBIxq6mmwEqfYrbLMuf75eBSHhccVHy6lsBeM4=
|
||||
github.com/opencontainers/image-tools v1.0.0-rc3/go.mod h1:A9btVpZLzttF4iFaKNychhPyrhfOjJ1OF5KrA8GcLj4=
|
||||
github.com/opencontainers/runc v1.1.10 h1:EaL5WeO9lv9wmS6SASjszOeQdSctvpbu0DdBQBizE40=
|
||||
github.com/opencontainers/runc v1.1.10/go.mod h1:+/R6+KmDlh+hOO8NkjmgkG9Qzvypzk0yXxAPYYR65+M=
|
||||
github.com/opencontainers/runc v1.1.11 h1:9LjxyVlE0BPMRP2wuQDRlHV4941Jp9rc3F0+YKimopA=
|
||||
github.com/opencontainers/runc v1.1.11/go.mod h1:S+lQwSfncpBha7XTy/5lBwWgm5+y5Ma/O44Ekby9FK8=
|
||||
github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg=
|
||||
github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||
@ -316,18 +319,18 @@ github.com/russross/blackfriday v2.0.0+incompatible h1:cBXrhZNUf9C+La9/YpS+UHpUT
|
||||
github.com/russross/blackfriday v2.0.0+incompatible/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
|
||||
github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c=
|
||||
github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE=
|
||||
github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ=
|
||||
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/sigstore/fulcio v1.4.3 h1:9JcUCZjjVhRF9fmhVuz6i1RyhCc/EGCD7MOl+iqCJLQ=
|
||||
github.com/sigstore/fulcio v1.4.3/go.mod h1:BQPWo7cfxmJwgaHlphUHUpFkp5+YxeJes82oo39m5og=
|
||||
github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY=
|
||||
github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg=
|
||||
github.com/sigstore/sigstore v1.7.5 h1:ij55dBhLwjICmLTBJZm7SqoQLdsu/oowDanACcJNs48=
|
||||
github.com/sigstore/sigstore v1.7.5/go.mod h1:9OCmYWhzuq/G4e1cy9m297tuMRJ1LExyrXY3ZC3Zt/s=
|
||||
github.com/sigstore/sigstore v1.8.1 h1:mAVposMb14oplk2h/bayPmIVdzbq2IhCgy4g6R0ZSjo=
|
||||
github.com/sigstore/sigstore v1.8.1/go.mod h1:02SL1158BSj15bZyOFz7m+/nJzLZfFd9A8ab3Kz7w/E=
|
||||
github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
@ -356,14 +359,13 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/sylabs/sif/v2 v2.15.0 h1:Nv0tzksFnoQiQ2eUwpAis9nVqEu4c3RcNSxX8P3Cecw=
|
||||
github.com/sylabs/sif/v2 v2.15.0/go.mod h1:X1H7eaPz6BAxA84POMESXoXfTqgAnLQkujyF/CQFWTc=
|
||||
github.com/sylabs/sif/v2 v2.15.1 h1:75BcunPOY11fVhe02/WHuNLTfDd3OHH0ex0MuuNMYX0=
|
||||
github.com/sylabs/sif/v2 v2.15.1/go.mod h1:YiwCUdZOhiohnPbyxuxvCZa+03HwAaiC+vfAKZPR8nQ=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1 h1:6rQp39lgIYZ+MHmdEq4xzuk1t7OdC35z/xm0BGhTkes=
|
||||
github.com/tchap/go-patricia/v2 v2.3.1/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0=
|
||||
github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs=
|
||||
github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
@ -371,15 +373,15 @@ github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8=
|
||||
github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts=
|
||||
github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk=
|
||||
github.com/vbauerster/mpb/v8 v8.6.2 h1:9EhnJGQRtvgDVCychJgR96EDCOqgg2NsMuk5JUcX4DA=
|
||||
github.com/vbauerster/mpb/v8 v8.6.2/go.mod h1:oVJ7T+dib99kZ/VBjoBaC8aPXiSAihnzuKmotuihyFo=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vbauerster/mpb/v8 v8.7.2 h1:SMJtxhNho1MV3OuFgS1DAzhANN1Ejc5Ct+0iSaIkB14=
|
||||
github.com/vbauerster/mpb/v8 v8.7.2/go.mod h1:ZFnrjzspgDHoxYLGvxIruiNk73GNTPG4YHgVNpR10VY=
|
||||
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
|
||||
github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs=
|
||||
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
|
||||
github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4=
|
||||
github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM=
|
||||
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@ -399,19 +401,24 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t
|
||||
go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg=
|
||||
go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
|
||||
go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8=
|
||||
go.mongodb.org/mongo-driver v1.11.3 h1:Ql6K6qYHEzB6xvu4+AU0BoRoqf9vFPcc4o7MUIdPW8Y=
|
||||
go.mongodb.org/mongo-driver v1.11.3/go.mod h1:PTSz5yu21bkT/wXpkS7WR5f0ddqw5quethTUn9WM+2g=
|
||||
go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk=
|
||||
go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak=
|
||||
go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q=
|
||||
go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs=
|
||||
go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
|
||||
go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE=
|
||||
go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8=
|
||||
go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
|
||||
go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg=
|
||||
go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo=
|
||||
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
|
||||
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@ -422,8 +429,8 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY=
|
||||
golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
|
||||
golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc=
|
||||
golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b h1:kLiC65FbiHWFAOu+lxwNPujcsl8VYyTYYEZnsOO1WK4=
|
||||
golang.org/x/exp v0.0.0-20231226003508-02704c960a9b/go.mod h1:iRJReGqOEeBhDZGkGbynYwcHlctCvnjTYIamk7uXpHI=
|
||||
@ -448,11 +455,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
|
||||
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
|
||||
golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo=
|
||||
golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.14.0 h1:P0Vrf/2538nmC0H+pEQ3MNFRRnVR7RlqyVw+bvm26z0=
|
||||
golang.org/x/oauth2 v0.14.0/go.mod h1:lAtNWgaWfL4cm7j2OV8TxGi9Qb7ECORx8DktCY74OwM=
|
||||
golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ=
|
||||
golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@ -462,8 +469,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
|
||||
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
|
||||
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -492,6 +499,7 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
@ -521,6 +529,8 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@ -528,8 +538,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
|
||||
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
|
||||
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
|
||||
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@ -543,7 +553,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alexcesaro/statsd.v2 v2.0.0 h1:FXkZSCZIH17vLCO5sO2UucTHsH9pc+17F6pl3JVCwMc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@ -563,6 +572,6 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
|
||||
gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
7
vendor/github.com/Microsoft/hcsshim/.golangci.yml
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/.golangci.yml
generated
vendored
@ -20,6 +20,7 @@ linters:
|
||||
# - typecheck
|
||||
# - unused
|
||||
|
||||
- errorlint # error wrapping (eg, not using `errors.Is`, using `%s` instead of `%w` in `fmt.Errorf`)
|
||||
- gofmt # whether code was gofmt-ed
|
||||
- govet # enabled by default, but just to be sure
|
||||
- nolintlint # ill-formed or insufficient nolint directives
|
||||
@ -53,6 +54,12 @@ issues:
|
||||
text: "^ST1003: should not use underscores in package names$"
|
||||
source: "^package cri_containerd$"
|
||||
|
||||
# don't bother with propper error wrapping in test code
|
||||
- path: cri-containerd
|
||||
linters:
|
||||
- errorlint
|
||||
text: "non-wrapping format verb for fmt.Errorf"
|
||||
|
||||
# This repo has a LOT of generated schema files, operating system bindings, and other
|
||||
# things that ST1003 from stylecheck won't like (screaming case Windows api constants for example).
|
||||
# There's also some structs that we *could* change the initialisms to be Go friendly
|
||||
|
84
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
84
vendor/github.com/Microsoft/hcsshim/README.md
generated
vendored
@ -9,15 +9,18 @@ It is primarily used in the [Moby](https://github.com/moby/moby) and [Containerd
|
||||
## Building
|
||||
|
||||
While this repository can be used as a library of sorts to call the HCS apis, there are a couple binaries built out of the repository as well. The main ones being the Linux guest agent, and an implementation of the [runtime v2 containerd shim api](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
|
||||
|
||||
### Linux Hyper-V Container Guest Agent
|
||||
|
||||
To build the Linux guest agent itself all that's needed is to set your GOOS to "Linux" and build out of ./cmd/gcs.
|
||||
|
||||
```powershell
|
||||
C:\> $env:GOOS="linux"
|
||||
C:\> go build .\cmd\gcs\
|
||||
```
|
||||
|
||||
or on a Linux machine
|
||||
|
||||
```sh
|
||||
> go build ./cmd/gcs
|
||||
```
|
||||
@ -33,13 +36,15 @@ make all
|
||||
```
|
||||
|
||||
If the build is successful, in the `./out` folder you should see:
|
||||
|
||||
```sh
|
||||
> ls ./out/
|
||||
delta.tar.gz initrd.img rootfs.tar.gz
|
||||
```
|
||||
|
||||
### Containerd Shim
|
||||
For info on the Runtime V2 API: https://github.com/containerd/containerd/blob/master/runtime/v2/README.md.
|
||||
|
||||
For info on the [Runtime V2 API](https://github.com/containerd/containerd/blob/master/runtime/v2/README.md).
|
||||
|
||||
Contrary to the typical Linux architecture of shim -> runc, the runhcs shim is used both to launch and manage the lifetime of containers.
|
||||
|
||||
@ -48,7 +53,9 @@ C:\> $env:GOOS="windows"
|
||||
C:\> go build .\cmd\containerd-shim-runhcs-v1
|
||||
```
|
||||
|
||||
Then place the binary in the same directory that Containerd is located at in your environment. A default Containerd configuration file can be generated by running:
|
||||
Then place the binary in the same directory that Containerd is located at in your environment.
|
||||
A default Containerd configuration file can be generated by running:
|
||||
|
||||
```powershell
|
||||
.\containerd.exe config default | Out-File "C:\Program Files\containerd\config.toml" -Encoding ascii
|
||||
```
|
||||
@ -56,6 +63,7 @@ Then place the binary in the same directory that Containerd is located at in you
|
||||
This config file will already have the shim set as the default runtime for cri interactions.
|
||||
|
||||
To trial using the shim out with ctr.exe:
|
||||
|
||||
```powershell
|
||||
C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/windows/nanoserver:2004 windows-test cmd /c "echo Hello World!"
|
||||
```
|
||||
@ -64,16 +72,69 @@ C:\> ctr.exe run --runtime io.containerd.runhcs.v1 --rm mcr.microsoft.com/window
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a
|
||||
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
|
||||
the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
the rights to use your contribution. For details, visit [Microsoft CLA](https://cla.microsoft.com).
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
|
||||
a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
|
||||
provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also require that contributors [sign their commits](https://git-scm.com/docs/git-commit) using `git commit -s` or `git commit --signoff` to
|
||||
certify they either authored the work themselves or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for
|
||||
more info, as well as to make sure that you can attest to the rules listed. Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure
|
||||
that all commits in a given PR are signed-off.
|
||||
We require that contributors sign their commits
|
||||
to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
|
||||
We also require that contributors sign their commits using using [`git commit --signoff`][git-commit-s]
|
||||
to certify they either authored the work themselves or otherwise have permission to use it in this project.
|
||||
A range of commits can be signed off using [`git rebase --signoff`][git-rebase-s].
|
||||
|
||||
Please see [the developer certificate](https://developercertificate.org) for more info,
|
||||
as well as to make sure that you can attest to the rules listed.
|
||||
Our CI uses the [DCO Github app](https://github.com/apps/dco) to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
### Linting
|
||||
|
||||
Code must pass a linting stage, which uses [`golangci-lint`][lint].
|
||||
Since `./test` is a separate Go module, the linter is run from both the root and the
|
||||
`test` directories. Additionally, the linter is run with `GOOS` set to both `windows` and
|
||||
`linux`.
|
||||
|
||||
The linting settings are stored in [`.golangci.yaml`](./.golangci.yaml), and can be run
|
||||
automatically with VSCode by adding the following to your workspace or folder settings:
|
||||
|
||||
```json
|
||||
"go.lintTool": "golangci-lint",
|
||||
"go.lintOnSave": "package",
|
||||
```
|
||||
|
||||
Additional editor [integrations options are also available][lint-ide].
|
||||
|
||||
Alternatively, `golangci-lint` can be [installed][lint-install] and run locally:
|
||||
|
||||
```shell
|
||||
# use . or specify a path to only lint a package
|
||||
# to show all lint errors, use flags "--max-issues-per-linter=0 --max-same-issues=0"
|
||||
> golangci-lint run
|
||||
```
|
||||
|
||||
To run across the entire repo for both `GOOS=windows` and `linux`:
|
||||
|
||||
```powershell
|
||||
> foreach ( $goos in ('windows', 'linux') ) {
|
||||
foreach ( $repo in ('.', 'test') ) {
|
||||
pwsh -Command "cd $repo && go env -w GOOS=$goos && golangci-lint.exe run --verbose"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Go Generate
|
||||
|
||||
The pipeline checks that auto-generated code, via `go generate`, are up to date.
|
||||
Similar to the [linting stage](#linting), `go generate` is run in both the root and test Go modules.
|
||||
|
||||
This can be done via:
|
||||
|
||||
```shell
|
||||
> go generate ./...
|
||||
> cd test && go generate ./...
|
||||
```
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
@ -83,7 +144,7 @@ contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additio
|
||||
|
||||
## Dependencies
|
||||
|
||||
This project requires Golang 1.17 or newer to build.
|
||||
This project requires Golang 1.18 or newer to build.
|
||||
|
||||
For system requirements to run this project, see the Microsoft docs on [Windows Container requirements](https://docs.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/system-requirements).
|
||||
|
||||
@ -100,3 +161,10 @@ For additional details, see [Report a Computer Security Vulnerability](https://t
|
||||
|
||||
---------------
|
||||
Copyright (c) 2018 Microsoft Corp. All rights reserved.
|
||||
|
||||
[lint]: https://golangci-lint.run/
|
||||
[lint-ide]: https://golangci-lint.run/usage/integrations/#editor-integration
|
||||
[lint-install]: https://golangci-lint.run/usage/install/#local-installation
|
||||
|
||||
[git-commit-s]: https://git-scm.com/docs/git-commit#Documentation/git-commit.txt--s
|
||||
[git-rebase-s]: https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---signoff
|
||||
|
28
vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
generated
vendored
28
vendor/github.com/Microsoft/hcsshim/computestorage/attach.go
generated
vendored
@ -38,3 +38,31 @@ func AttachLayerStorageFilter(ctx context.Context, layerPath string, layerData L
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AttachOverlayFilter sets up a filter of the given type on a writable container layer. Currently the only
|
||||
// supported filter types are WCIFS & UnionFS (defined in internal/hcs/schema2/layer.go)
|
||||
//
|
||||
// `volumePath` is volume path at which writable layer is mounted. If the
|
||||
// path does not end in a `\` the platform will append it automatically.
|
||||
//
|
||||
// `layerData` is the parent read-only layer data.
|
||||
func AttachOverlayFilter(ctx context.Context, volumePath string, layerData LayerData) (err error) {
|
||||
title := "hcsshim::AttachOverlayFilter"
|
||||
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(
|
||||
trace.StringAttribute("volumePath", volumePath),
|
||||
)
|
||||
|
||||
bytes, err := json.Marshal(layerData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = hcsAttachOverlayFilter(volumePath, string(bytes))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to attach overlay filter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
26
vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
generated
vendored
26
vendor/github.com/Microsoft/hcsshim/computestorage/detach.go
generated
vendored
@ -4,7 +4,9 @@ package computestorage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/pkg/errors"
|
||||
"go.opencensus.io/trace"
|
||||
@ -26,3 +28,27 @@ func DetachLayerStorageFilter(ctx context.Context, layerPath string) (err error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetachOverlayFilter detaches the filter on a writable container layer.
|
||||
//
|
||||
// `volumePath` is a path to writable container volume.
|
||||
func DetachOverlayFilter(ctx context.Context, volumePath string, filterType hcsschema.FileSystemFilterType) (err error) {
|
||||
title := "hcsshim::DetachOverlayFilter"
|
||||
ctx, span := oc.StartSpan(ctx, title) //nolint:ineffassign,staticcheck
|
||||
defer span.End()
|
||||
defer func() { oc.SetSpanStatus(span, err) }()
|
||||
span.AddAttributes(trace.StringAttribute("volumePath", volumePath))
|
||||
|
||||
layerData := LayerData{}
|
||||
layerData.FilterType = filterType
|
||||
bytes, err := json.Marshal(layerData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = hcsDetachOverlayFilter(volumePath, string(bytes))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to detach overlay filter")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
7
vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/computestorage/storage.go
generated
vendored
@ -19,14 +19,17 @@ import (
|
||||
//sys hcsFormatWritableLayerVhd(handle windows.Handle) (hr error) = computestorage.HcsFormatWritableLayerVhd?
|
||||
//sys hcsGetLayerVhdMountPath(vhdHandle windows.Handle, mountPath **uint16) (hr error) = computestorage.HcsGetLayerVhdMountPath?
|
||||
//sys hcsSetupBaseOSVolume(layerPath string, volumePath string, options string) (hr error) = computestorage.HcsSetupBaseOSVolume?
|
||||
//sys hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsAttachOverlayFilter?
|
||||
//sys hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) = computestorage.HcsDetachOverlayFilter?
|
||||
|
||||
type Version = hcsschema.Version
|
||||
type Layer = hcsschema.Layer
|
||||
|
||||
// LayerData is the data used to describe parent layer information.
|
||||
type LayerData struct {
|
||||
SchemaVersion Version `json:"SchemaVersion,omitempty"`
|
||||
Layers []Layer `json:"Layers,omitempty"`
|
||||
SchemaVersion Version `json:"SchemaVersion,omitempty"`
|
||||
Layers []Layer `json:"Layers,omitempty"`
|
||||
FilterType hcsschema.FileSystemFilterType `json:"FilterType,omitempty"`
|
||||
}
|
||||
|
||||
// ExportLayerOptions are the set of options that are used with the `computestorage.HcsExportLayer` syscall.
|
||||
|
60
vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
generated
vendored
60
vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go
generated
vendored
@ -43,8 +43,10 @@ var (
|
||||
modcomputestorage = windows.NewLazySystemDLL("computestorage.dll")
|
||||
|
||||
procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter")
|
||||
procHcsAttachOverlayFilter = modcomputestorage.NewProc("HcsAttachOverlayFilter")
|
||||
procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer")
|
||||
procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter")
|
||||
procHcsDetachOverlayFilter = modcomputestorage.NewProc("HcsDetachOverlayFilter")
|
||||
procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer")
|
||||
procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd")
|
||||
procHcsGetLayerVhdMountPath = modcomputestorage.NewProc("HcsGetLayerVhdMountPath")
|
||||
@ -83,6 +85,35 @@ func _hcsAttachLayerStorageFilter(layerPath *uint16, layerData *uint16) (hr erro
|
||||
return
|
||||
}
|
||||
|
||||
func hcsAttachOverlayFilter(volumePath string, layerData string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(volumePath)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, hr = syscall.UTF16PtrFromString(layerData)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _hcsAttachOverlayFilter(_p0, _p1)
|
||||
}
|
||||
|
||||
func _hcsAttachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
|
||||
hr = procHcsAttachOverlayFilter.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsAttachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0)
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func hcsDestroyLayer(layerPath string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(layerPath)
|
||||
@ -131,6 +162,35 @@ func _hcsDetachLayerStorageFilter(layerPath *uint16) (hr error) {
|
||||
return
|
||||
}
|
||||
|
||||
func hcsDetachOverlayFilter(volumePath string, layerData string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(volumePath)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
var _p1 *uint16
|
||||
_p1, hr = syscall.UTF16PtrFromString(layerData)
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
return _hcsDetachOverlayFilter(_p0, _p1)
|
||||
}
|
||||
|
||||
func _hcsDetachOverlayFilter(volumePath *uint16, layerData *uint16) (hr error) {
|
||||
hr = procHcsDetachOverlayFilter.Find()
|
||||
if hr != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procHcsDetachOverlayFilter.Addr(), 2, uintptr(unsafe.Pointer(volumePath)), uintptr(unsafe.Pointer(layerData)), 0)
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) {
|
||||
var _p0 *uint16
|
||||
_p0, hr = syscall.UTF16PtrFromString(layerPath)
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/container.go
generated
vendored
@ -75,7 +75,7 @@ func init() {
|
||||
func CreateContainer(id string, c *ContainerConfig) (Container, error) {
|
||||
fullConfig, err := mergemaps.MergeJSON(c, createContainerAdditionalJSON)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %s", createContainerAdditionalJSON, err)
|
||||
return nil, fmt.Errorf("failed to merge additional JSON '%s': %w", createContainerAdditionalJSON, err)
|
||||
}
|
||||
|
||||
system, err := hcs.CreateComputeSystem(context.Background(), id, fullConfig)
|
||||
|
11
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
11
vendor/github.com/Microsoft/hcsshim/errors.go
generated
vendored
@ -115,6 +115,7 @@ func (e *ContainerError) Error() string {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
|
||||
//nolint:errorlint // legacy code
|
||||
switch e.Err.(type) {
|
||||
case nil:
|
||||
break
|
||||
@ -145,6 +146,7 @@ func (e *ProcessError) Error() string {
|
||||
s += " encountered an error during " + e.Operation
|
||||
}
|
||||
|
||||
//nolint:errorlint // legacy code
|
||||
switch e.Err.(type) {
|
||||
case nil:
|
||||
break
|
||||
@ -166,10 +168,10 @@ func (e *ProcessError) Error() string {
|
||||
// already exited, or does not exist. Both IsAlreadyStopped and IsNotExist
|
||||
// will currently return true when the error is ErrElementNotFound.
|
||||
func IsNotExist(err error) bool {
|
||||
if _, ok := err.(EndpointNotFoundError); ok {
|
||||
if _, ok := err.(EndpointNotFoundError); ok { //nolint:errorlint // legacy code
|
||||
return true
|
||||
}
|
||||
if _, ok := err.(NetworkNotFoundError); ok {
|
||||
if _, ok := err.(NetworkNotFoundError); ok { //nolint:errorlint // legacy code
|
||||
return true
|
||||
}
|
||||
return hcs.IsNotExist(getInnerError(err))
|
||||
@ -224,6 +226,7 @@ func IsAccessIsDenied(err error) bool {
|
||||
}
|
||||
|
||||
func getInnerError(err error) error {
|
||||
//nolint:errorlint // legacy code
|
||||
switch pe := err.(type) {
|
||||
case nil:
|
||||
return nil
|
||||
@ -236,14 +239,14 @@ func getInnerError(err error) error {
|
||||
}
|
||||
|
||||
func convertSystemError(err error, c *container) error {
|
||||
if serr, ok := err.(*hcs.SystemError); ok {
|
||||
if serr, ok := err.(*hcs.SystemError); ok { //nolint:errorlint // legacy code
|
||||
return &ContainerError{Container: c, Operation: serr.Op, Err: serr.Err, Events: serr.Events}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func convertProcessError(err error, p *process) error {
|
||||
if perr, ok := err.(*hcs.ProcessError); ok {
|
||||
if perr, ok := err.(*hcs.ProcessError); ok { //nolint:errorlint // legacy code
|
||||
return &ProcessError{Process: p, Operation: perr.Op, Err: perr.Err, Events: perr.Events}
|
||||
}
|
||||
return err
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/process.go
generated
vendored
@ -63,7 +63,7 @@ func (process *Process) SystemID() string {
|
||||
}
|
||||
|
||||
func (process *Process) processSignalResult(ctx context.Context, err error) (bool, error) {
|
||||
switch err {
|
||||
switch err { //nolint:errorlint
|
||||
case nil:
|
||||
return true, nil
|
||||
case ErrVmcomputeOperationInvalidState, ErrComputeSystemDoesNotExist, ErrElementNotFound:
|
||||
|
7
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/layer.go
generated
vendored
@ -9,6 +9,13 @@
|
||||
|
||||
package hcsschema
|
||||
|
||||
type FileSystemFilterType string
|
||||
|
||||
const (
|
||||
UnionFS FileSystemFilterType = "UnionFS"
|
||||
WCIFS FileSystemFilterType = "WCIFS"
|
||||
)
|
||||
|
||||
type Layer struct {
|
||||
Id string `json:"Id,omitempty"`
|
||||
|
||||
|
13
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go
generated
vendored
Normal file
13
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_hive.go
generated
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
package hcsschema
|
||||
|
||||
// NOTE: manually added
|
||||
|
||||
type RegistryHive string
|
||||
|
||||
// List of RegistryHive
|
||||
const (
|
||||
RegistryHive_SYSTEM RegistryHive = "System"
|
||||
RegistryHive_SOFTWARE RegistryHive = "Software"
|
||||
RegistryHive_SECURITY RegistryHive = "Security"
|
||||
RegistryHive_SAM RegistryHive = "Sam"
|
||||
)
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_key.go
generated
vendored
@ -10,7 +10,7 @@
|
||||
package hcsschema
|
||||
|
||||
type RegistryKey struct {
|
||||
Hive string `json:"Hive,omitempty"`
|
||||
Hive RegistryHive `json:"Hive,omitempty"`
|
||||
|
||||
Name string `json:"Name,omitempty"`
|
||||
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value.go
generated
vendored
@ -14,7 +14,7 @@ type RegistryValue struct {
|
||||
|
||||
Name string `json:"Name,omitempty"`
|
||||
|
||||
Type_ string `json:"Type,omitempty"`
|
||||
Type_ RegistryValueType `json:"Type,omitempty"`
|
||||
|
||||
// One and only one value type must be set.
|
||||
StringValue string `json:"StringValue,omitempty"`
|
||||
|
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go
generated
vendored
Normal file
17
vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/registry_value_type.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
package hcsschema
|
||||
|
||||
// NOTE: manually added
|
||||
|
||||
type RegistryValueType string
|
||||
|
||||
// List of RegistryValueType
|
||||
const (
|
||||
RegistryValueType_NONE RegistryValueType = "None"
|
||||
RegistryValueType_STRING RegistryValueType = "String"
|
||||
RegistryValueType_EXPANDED_STRING RegistryValueType = "ExpandedString"
|
||||
RegistryValueType_MULTI_STRING RegistryValueType = "MultiString"
|
||||
RegistryValueType_BINARY RegistryValueType = "Binary"
|
||||
RegistryValueType_D_WORD RegistryValueType = "DWord"
|
||||
RegistryValueType_Q_WORD RegistryValueType = "QWord"
|
||||
RegistryValueType_CUSTOM_TYPE RegistryValueType = "CustomType"
|
||||
)
|
8
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/internal/hcs/system.go
generated
vendored
@ -97,7 +97,7 @@ func CreateComputeSystem(ctx context.Context, id string, hcsDocumentInterface in
|
||||
events, err := processAsyncHcsResult(ctx, createError, resultJSON, computeSystem.callbackNumber,
|
||||
hcsNotificationSystemCreateCompleted, &timeout.SystemCreate)
|
||||
if err != nil {
|
||||
if err == ErrTimeout {
|
||||
if errors.Is(err, ErrTimeout) {
|
||||
// Terminate the compute system if it still exists. We're okay to
|
||||
// ignore a failure here.
|
||||
_ = computeSystem.Terminate(ctx)
|
||||
@ -238,7 +238,7 @@ func (computeSystem *System) Shutdown(ctx context.Context) error {
|
||||
|
||||
resultJSON, err := vmcompute.HcsShutdownComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err {
|
||||
switch err { //nolint:errorlint
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
@ -259,7 +259,7 @@ func (computeSystem *System) Terminate(ctx context.Context) error {
|
||||
|
||||
resultJSON, err := vmcompute.HcsTerminateComputeSystem(ctx, computeSystem.handle, "")
|
||||
events := processHcsResult(ctx, resultJSON)
|
||||
switch err {
|
||||
switch err { //nolint:errorlint
|
||||
case nil, ErrVmcomputeAlreadyStopped, ErrComputeSystemDoesNotExist, ErrVmcomputeOperationPending:
|
||||
default:
|
||||
return makeSystemError(computeSystem, operation, err, events)
|
||||
@ -279,7 +279,7 @@ func (computeSystem *System) waitBackground() {
|
||||
span.AddAttributes(trace.StringAttribute("cid", computeSystem.id))
|
||||
|
||||
err := waitForNotification(ctx, computeSystem.callbackNumber, hcsNotificationSystemExited, nil)
|
||||
switch err {
|
||||
switch err { //nolint:errorlint
|
||||
case nil:
|
||||
log.G(ctx).Debug("system exited")
|
||||
case ErrVmcomputeUnexpectedExit:
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/hns/hnsfuncs.go
generated
vendored
@ -31,7 +31,7 @@ func hnsCallRawResponse(method, path, request string) (*hnsResponse, error) {
|
||||
func hnsCall(method, path, request string, returnResponse interface{}) error {
|
||||
hnsresponse, err := hnsCallRawResponse(method, path, request)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed during hnsCallRawResponse: %v", err)
|
||||
return fmt.Errorf("failed during hnsCallRawResponse: %w", err)
|
||||
}
|
||||
if !hnsresponse.Success {
|
||||
return fmt.Errorf("hns failed with error : %s", hnsresponse.Error)
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/hns/namespace.go
generated
vendored
@ -56,7 +56,7 @@ func issueNamespaceRequest(id *string, method, subpath string, request interface
|
||||
if strings.Contains(err.Error(), "Element not found.") {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
return nil, fmt.Errorf("%s %s: %s", method, hnspath, err)
|
||||
return nil, fmt.Errorf("%s %s: %w", method, hnspath, err)
|
||||
}
|
||||
return &ns, err
|
||||
}
|
||||
@ -86,7 +86,7 @@ func GetNamespaceEndpoints(id string) ([]string, error) {
|
||||
var endpoint namespaceEndpointRequest
|
||||
err = json.Unmarshal(rsrc.Data, &endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unmarshal endpoint: %s", err)
|
||||
return nil, fmt.Errorf("unmarshal endpoint: %w", err)
|
||||
}
|
||||
endpoints = append(endpoints, endpoint.ID)
|
||||
}
|
||||
|
3
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
3
vendor/github.com/Microsoft/hcsshim/internal/jobobject/iocp.go
generated
vendored
@ -4,6 +4,7 @@ package jobobject
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"unsafe"
|
||||
@ -59,7 +60,7 @@ func pollIOCP(ctx context.Context, iocpHandle windows.Handle) {
|
||||
}).Warn("failed to parse job object message")
|
||||
continue
|
||||
}
|
||||
if err := msq.Enqueue(notification); err == queue.ErrQueueClosed {
|
||||
if err := msq.Enqueue(notification); errors.Is(err, queue.ErrQueueClosed) {
|
||||
// Write will only return an error when the queue is closed.
|
||||
// The only time a queue would ever be closed is when we call `Close` on
|
||||
// the job it belongs to which also removes it from the jobMap, so something
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go
generated
vendored
@ -374,7 +374,7 @@ func (job *JobObject) Pids() ([]uint32, error) {
|
||||
return []uint32{}, nil
|
||||
}
|
||||
|
||||
if err != winapi.ERROR_MORE_DATA {
|
||||
if err != winapi.ERROR_MORE_DATA { //nolint:errorlint
|
||||
return nil, fmt.Errorf("failed initial query for PIDs in job object: %w", err)
|
||||
}
|
||||
|
||||
|
7
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
7
vendor/github.com/Microsoft/hcsshim/internal/jobobject/limits.go
generated
vendored
@ -143,6 +143,13 @@ func (job *JobObject) SetCPUAffinity(affinityBitMask uint64) error {
|
||||
return err
|
||||
}
|
||||
info.BasicLimitInformation.LimitFlags |= uint32(windows.JOB_OBJECT_LIMIT_AFFINITY)
|
||||
|
||||
// We really, really shouldn't be running on 32 bit, but just in case (and to satisfy CodeQL) ...
|
||||
const maxUintptr = ^uintptr(0)
|
||||
if affinityBitMask > uint64(maxUintptr) {
|
||||
return fmt.Errorf("affinity bitmask (%d) exceeds max allowable value (%d)", affinityBitMask, maxUintptr)
|
||||
}
|
||||
|
||||
info.BasicLimitInformation.Affinity = uintptr(affinityBitMask)
|
||||
return job.setExtendedInformation(info)
|
||||
}
|
||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/log/format.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/log/format.go
generated
vendored
@ -104,6 +104,7 @@ func encode(v interface{}) (_ []byte, err error) {
|
||||
if jErr := enc.Encode(v); jErr != nil {
|
||||
if err != nil {
|
||||
// TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...)
|
||||
//nolint:errorlint // non-wrapping format verb for fmt.Errorf
|
||||
return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr)
|
||||
}
|
||||
return nil, fmt.Errorf("json encoding: %w", jErr)
|
||||
|
1
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
1
vendor/github.com/Microsoft/hcsshim/internal/logfields/fields.go
generated
vendored
@ -46,6 +46,7 @@ const (
|
||||
|
||||
ExpectedType = "expected-type"
|
||||
Bool = "bool"
|
||||
Int32 = "int32"
|
||||
Uint32 = "uint32"
|
||||
Uint64 = "uint64"
|
||||
|
||||
|
6
vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/memory/pool.go
generated
vendored
@ -126,7 +126,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) {
|
||||
// this means that there are no more regions for the current class, try expanding
|
||||
if nextCls != memCls {
|
||||
if err := pa.split(memCls); err != nil {
|
||||
if err == ErrInvalidMemoryClass {
|
||||
if errors.Is(err, ErrInvalidMemoryClass) {
|
||||
return nil, ErrNotEnoughSpace
|
||||
}
|
||||
return nil, err
|
||||
@ -147,7 +147,7 @@ func (pa *PoolAllocator) Allocate(size uint64) (MappedRegion, error) {
|
||||
}
|
||||
|
||||
// Release marks a memory region of class `memCls` and offset `offset` as free and tries to merge smaller regions into
|
||||
// a bigger one
|
||||
// a bigger one.
|
||||
func (pa *PoolAllocator) Release(reg MappedRegion) error {
|
||||
mp := pa.pools[reg.Type()]
|
||||
if mp == nil {
|
||||
@ -164,7 +164,7 @@ func (pa *PoolAllocator) Release(reg MappedRegion) error {
|
||||
return ErrNotAllocated
|
||||
}
|
||||
if err := pa.merge(n.parent); err != nil {
|
||||
if err != ErrEarlyMerge {
|
||||
if !errors.Is(err, ErrEarlyMerge) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go
generated
vendored
@ -243,7 +243,7 @@ func RemoveRelative(path string, root *os.File) error {
|
||||
if err == nil {
|
||||
defer f.Close()
|
||||
err = deleteOnClose(f)
|
||||
if err == syscall.ERROR_ACCESS_DENIED {
|
||||
if err == syscall.ERROR_ACCESS_DENIED { //nolint:errorlint
|
||||
// Maybe the file is marked readonly. Clear the bit and retry.
|
||||
_ = clearReadOnly(f)
|
||||
err = deleteOnClose(f)
|
||||
|
16
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
generated
vendored
16
vendor/github.com/Microsoft/hcsshim/internal/vmcompute/vmcompute.go
generated
vendored
@ -104,7 +104,7 @@ func execute(ctx gcontext.Context, timeout time.Duration, f func() error) error
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == gcontext.DeadlineExceeded {
|
||||
if ctx.Err() == gcontext.DeadlineExceeded { //nolint:errorlint
|
||||
log.G(ctx).WithField(logfields.Timeout, trueTimeout).
|
||||
Warning("Syscall did not complete within operation timeout. This may indicate a platform issue. " +
|
||||
"If it appears to be making no forward progress, obtain the stacks and see if there is a syscall " +
|
||||
@ -150,7 +150,7 @@ func HcsCreateComputeSystem(ctx gcontext.Context, id string, configuration strin
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
@ -205,7 +205,7 @@ func HcsStartComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
@ -228,7 +228,7 @@ func HcsShutdownComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, opt
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
@ -251,7 +251,7 @@ func HcsTerminateComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, op
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
@ -274,7 +274,7 @@ func HcsPauseComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, option
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
@ -297,7 +297,7 @@ func HcsResumeComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, optio
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
@ -621,7 +621,7 @@ func HcsSaveComputeSystem(ctx gcontext.Context, computeSystem HcsSystem, options
|
||||
if result != "" {
|
||||
span.AddAttributes(trace.StringAttribute("result", result))
|
||||
}
|
||||
if hr != errVmcomputeOperationPending {
|
||||
if hr != errVmcomputeOperationPending { //nolint:errorlint // explicitly returned
|
||||
oc.SetSpanStatus(span, hr)
|
||||
}
|
||||
}()
|
||||
|
6
vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go
generated
vendored
6
vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
//go:build windows
|
||||
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
@ -64,7 +66,7 @@ func (r *baseLayerReader) walkUntilCancelled() error {
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == errorIterationCanceled {
|
||||
if err == errorIterationCanceled { //nolint:errorlint // explicitly returned
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -103,7 +105,7 @@ func (r *baseLayerReader) walkUntilCancelled() error {
|
||||
return nil
|
||||
})
|
||||
|
||||
if err == errorIterationCanceled {
|
||||
if err == errorIterationCanceled { //nolint:errorlint // explicitly returned
|
||||
return nil
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go
generated
vendored
2
vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
//go:build windows
|
||||
|
||||
package wclayer
|
||||
|
||||
import (
|
||||
|
18
vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
generated
vendored
18
vendor/github.com/Microsoft/hcsshim/internal/wclayer/expandscratchsize.go
generated
vendored
@ -11,7 +11,6 @@ import (
|
||||
|
||||
"github.com/Microsoft/hcsshim/internal/hcserror"
|
||||
"github.com/Microsoft/hcsshim/internal/oc"
|
||||
"github.com/Microsoft/hcsshim/osversion"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@ -30,14 +29,17 @@ func ExpandScratchSize(ctx context.Context, path string, size uint64) (err error
|
||||
return hcserror.New(err, title, "")
|
||||
}
|
||||
|
||||
// Manually expand the volume now in order to work around bugs in 19H1 and
|
||||
// prerelease versions of Vb. Remove once this is fixed in Windows.
|
||||
if build := osversion.Build(); build >= osversion.V19H1 && build < 19020 {
|
||||
err = expandSandboxVolume(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Always expand the volume too. In case of legacy layers not expanding the volume here works because
|
||||
// the PrepareLayer call internally handles the expansion. However, in other cases (like CimFS) we
|
||||
// don't call PrepareLayer and so the volume will never be expanded. This also means in case of
|
||||
// legacy layers, we might have a small perf hit because the VHD is mounted twice for expansion (once
|
||||
// here and once during the PrepareLayer call). But as long as the perf hit is minimal, we should be
|
||||
// okay.
|
||||
err = expandSandboxVolume(ctx, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
8
vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
generated
vendored
8
vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go
generated
vendored
@ -154,7 +154,7 @@ func (r *legacyLayerReader) walkUntilCancelled() error {
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err == errorIterationCanceled {
|
||||
if err == errorIterationCanceled { //nolint:errorlint // explicitly returned
|
||||
return nil
|
||||
}
|
||||
if err == nil {
|
||||
@ -196,7 +196,7 @@ func findBackupStreamSize(r io.Reader) (int64, error) {
|
||||
for {
|
||||
hdr, err := br.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
err = nil
|
||||
}
|
||||
return 0, err
|
||||
@ -428,7 +428,7 @@ func (w *legacyLayerWriter) initUtilityVM() error {
|
||||
// immutable.
|
||||
err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cloning the parent utility VM image failed: %s", err)
|
||||
return fmt.Errorf("cloning the parent utility VM image failed: %w", err)
|
||||
}
|
||||
w.HasUtilityVM = true
|
||||
}
|
||||
@ -451,7 +451,7 @@ func (w *legacyLayerWriter) reset() error {
|
||||
|
||||
for {
|
||||
bhdr, err := br.Next()
|
||||
if err == io.EOF {
|
||||
if errors.Is(err, io.EOF) {
|
||||
// end of backupstream data
|
||||
break
|
||||
}
|
||||
|
4
vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go
generated
vendored
4
vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
//go:build windows
|
||||
|
||||
package winapi
|
||||
|
||||
import (
|
||||
@ -34,7 +36,7 @@ type CimFsFileMetadata struct {
|
||||
//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage?
|
||||
|
||||
//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage?
|
||||
//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage?
|
||||
//sys CimCloseImage(cimFSHandle FsHandle) = cimfs.CimCloseImage?
|
||||
//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage?
|
||||
|
||||
//sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile?
|
||||
|
14
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
14
vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go
generated
vendored
@ -184,18 +184,12 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr
|
||||
return
|
||||
}
|
||||
|
||||
func CimCloseImage(cimFSHandle FsHandle) (hr error) {
|
||||
hr = procCimCloseImage.Find()
|
||||
if hr != nil {
|
||||
func CimCloseImage(cimFSHandle FsHandle) (err error) {
|
||||
err = procCimCloseImage.Find()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
|
||||
if int32(r0) < 0 {
|
||||
if r0&0x1fff0000 == 0x00070000 {
|
||||
r0 &= 0xffff
|
||||
}
|
||||
hr = syscall.Errno(r0)
|
||||
}
|
||||
syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0)
|
||||
return
|
||||
}
|
||||
|
||||
|
82
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
82
vendor/github.com/containers/image/v5/copy/manifest.go
generated
vendored
@ -6,8 +6,10 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
internalManifest "github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
"github.com/containers/image/v5/manifest"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -19,8 +21,8 @@ import (
|
||||
// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
|
||||
var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
|
||||
|
||||
// ociEncryptionMIMETypes lists manifest MIME types that are known to support OCI encryption.
|
||||
var ociEncryptionMIMETypes = []string{v1.MediaTypeImageManifest}
|
||||
// allManifestMIMETypes lists all possible manifest MIME types.
|
||||
var allManifestMIMETypes = []string{v1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType}
|
||||
|
||||
// orderedSet is a list of strings (MIME types or platform descriptors in our case), with each string appearing at most once.
|
||||
type orderedSet struct {
|
||||
@ -51,9 +53,10 @@ type determineManifestConversionInputs struct {
|
||||
|
||||
destSupportedManifestMIMETypes []string // MIME types supported by the destination, per types.ImageDestination.SupportedManifestMIMETypes()
|
||||
|
||||
forceManifestMIMEType string // User’s choice of forced manifest MIME type
|
||||
requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
forceManifestMIMEType string // User’s choice of forced manifest MIME type
|
||||
requestedCompressionFormat *compressiontypes.Algorithm // Compression algorithm to use, if the user _explictily_ requested one.
|
||||
requiresOCIEncryption bool // Restrict to manifest formats that can support OCI encryption
|
||||
cannotModifyManifestReason string // The reason the manifest cannot be modified, or an empty string if it can
|
||||
}
|
||||
|
||||
// manifestConversionPlan contains the decisions made by determineManifestConversion.
|
||||
@ -80,41 +83,74 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
||||
destSupportedManifestMIMETypes = []string{in.forceManifestMIMEType}
|
||||
}
|
||||
|
||||
restrictiveCompressionRequired := in.requestedCompressionFormat != nil && !internalManifest.CompressionAlgorithmIsUniversallySupported(*in.requestedCompressionFormat)
|
||||
if len(destSupportedManifestMIMETypes) == 0 {
|
||||
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType) {
|
||||
if (!in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(srcType)) &&
|
||||
(!restrictiveCompressionRequired || internalManifest.MIMETypeSupportsCompressionAlgorithm(srcType, *in.requestedCompressionFormat)) {
|
||||
return manifestConversionPlan{ // Anything goes; just use the original as is, do not try any conversions.
|
||||
preferredMIMEType: srcType,
|
||||
otherMIMETypeCandidates: []string{},
|
||||
}, nil
|
||||
}
|
||||
destSupportedManifestMIMETypes = ociEncryptionMIMETypes
|
||||
destSupportedManifestMIMETypes = allManifestMIMETypes
|
||||
}
|
||||
supportedByDest := set.New[string]()
|
||||
for _, t := range destSupportedManifestMIMETypes {
|
||||
if !in.requiresOCIEncryption || manifest.MIMETypeSupportsEncryption(t) {
|
||||
supportedByDest.Add(t)
|
||||
if in.requiresOCIEncryption && !manifest.MIMETypeSupportsEncryption(t) {
|
||||
continue
|
||||
}
|
||||
if restrictiveCompressionRequired && !internalManifest.MIMETypeSupportsCompressionAlgorithm(t, *in.requestedCompressionFormat) {
|
||||
continue
|
||||
}
|
||||
supportedByDest.Add(t)
|
||||
}
|
||||
if supportedByDest.Empty() {
|
||||
if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by ociEncryptionMIMETypes
|
||||
if len(destSupportedManifestMIMETypes) == 0 { // Coverage: This should never happen, empty values were replaced by allManifestMIMETypes
|
||||
return manifestConversionPlan{}, errors.New("internal error: destSupportedManifestMIMETypes is empty")
|
||||
}
|
||||
// We know, and have verified, that destSupportedManifestMIMETypes is not empty, so encryption must have been involved.
|
||||
if !in.requiresOCIEncryption { // Coverage: This should never happen, destSupportedManifestMIMETypes was not empty, so we should have filtered for encryption.
|
||||
return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and not encrypting")
|
||||
}
|
||||
// We know, and have verified, that destSupportedManifestMIMETypes is not empty, so some filtering of supported MIME types must have been involved.
|
||||
|
||||
// destSupportedManifestMIMETypes has three possible origins:
|
||||
if in.forceManifestMIMEType != "" { // 1. forceManifestType specified
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
|
||||
in.forceManifestMIMEType)
|
||||
switch {
|
||||
case in.requiresOCIEncryption && restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required together with format %s, which does not support both",
|
||||
in.requestedCompressionFormat.Name(), in.forceManifestMIMEType)
|
||||
case in.requiresOCIEncryption:
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required together with format %s, which does not support encryption",
|
||||
in.forceManifestMIMEType)
|
||||
case restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s required together with format %s, which does not support it",
|
||||
in.requestedCompressionFormat.Name(), in.forceManifestMIMEType)
|
||||
default:
|
||||
return manifestConversionPlan{}, errors.New("internal error: forceManifestMIMEType was rejected for an unknown reason")
|
||||
}
|
||||
}
|
||||
if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen ociEncryptionMIMETypes
|
||||
// Coverage: This should never happen, ociEncryptionMIMETypes all support encryption
|
||||
return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
|
||||
if len(in.destSupportedManifestMIMETypes) == 0 { // 2. destination accepts anything and we have chosen allManifestTypes
|
||||
if !restrictiveCompressionRequired {
|
||||
// Coverage: This should never happen.
|
||||
// If we have not rejected for encryption reasons, we must have rejected due to encryption, but
|
||||
// allManifestTypes includes OCI, which supports encryption.
|
||||
return manifestConversionPlan{}, errors.New("internal error: in.destSupportedManifestMIMETypes is empty but supportedByDest is empty as well")
|
||||
}
|
||||
// This can legitimately happen when the user asks for completely unsupported formats like Bzip2 or Xz.
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s required, but none of the known manifest formats support it", in.requestedCompressionFormat.Name())
|
||||
}
|
||||
// 3. destination accepts a restricted list of mime types
|
||||
destMIMEList := strings.Join(destSupportedManifestMIMETypes, ", ")
|
||||
switch {
|
||||
case in.requiresOCIEncryption && restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s, and encryption, required but the destination only supports MIME types [%s], none of which support both",
|
||||
in.requestedCompressionFormat.Name(), destMIMEList)
|
||||
case in.requiresOCIEncryption:
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
|
||||
destMIMEList)
|
||||
case restrictiveCompressionRequired:
|
||||
return manifestConversionPlan{}, fmt.Errorf("compression using %s required but the destination only supports MIME types [%s], none of which support it",
|
||||
in.requestedCompressionFormat.Name(), destMIMEList)
|
||||
default: // Coverage: This should never happen, we only filter for in.requiresOCIEncryption || restrictiveCompressionRequired
|
||||
return manifestConversionPlan{}, errors.New("internal error: supportedByDest is empty but destSupportedManifestMIMETypes is not, and we are neither encrypting nor requiring a restrictive compression algorithm")
|
||||
}
|
||||
// 3. destination does not support encryption.
|
||||
return manifestConversionPlan{}, fmt.Errorf("encryption required but the destination only supports MIME types [%s], none of which support encryption",
|
||||
strings.Join(destSupportedManifestMIMETypes, ", "))
|
||||
}
|
||||
|
||||
// destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
|
||||
@ -156,7 +192,7 @@ func determineManifestConversion(in determineManifestConversionInputs) (manifest
|
||||
}
|
||||
|
||||
logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
|
||||
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen.
|
||||
if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes and supportedByDest, which is a subset, is not empty (or we would have exited above), so this should never happen.
|
||||
return manifestConversionPlan{}, errors.New("Internal error: no candidate MIME types")
|
||||
}
|
||||
res := manifestConversionPlan{
|
||||
|
10
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
10
vendor/github.com/containers/image/v5/copy/single.go
generated
vendored
@ -20,6 +20,7 @@ import (
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/transports"
|
||||
"github.com/containers/image/v5/types"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -167,6 +168,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar
|
||||
srcMIMEType: ic.src.ManifestMIMEType,
|
||||
destSupportedManifestMIMETypes: ic.c.dest.SupportedManifestMIMETypes(),
|
||||
forceManifestMIMEType: c.options.ForceManifestMIMEType,
|
||||
requestedCompressionFormat: ic.compressionFormat,
|
||||
requiresOCIEncryption: destRequiresOciEncryption,
|
||||
cannotModifyManifestReason: ic.cannotModifyManifestReason,
|
||||
})
|
||||
@ -693,6 +695,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
requiredCompression = ic.compressionFormat
|
||||
originalCompression = srcInfo.CompressionAlgorithm
|
||||
}
|
||||
|
||||
// Check if we have a chunked layer in storage that's based on that blob. These layers are stored by their TOC digest.
|
||||
tocDigest, err := chunkedToc.GetTOCDigest(srcInfo.Annotations)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", err
|
||||
}
|
||||
|
||||
reused, reusedBlob, err := ic.c.dest.TryReusingBlobWithOptions(ctx, srcInfo, private.TryReusingBlobOptions{
|
||||
Cache: ic.c.blobInfoCache,
|
||||
CanSubstitute: canSubstitute,
|
||||
@ -701,6 +710,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
|
||||
SrcRef: srcRef,
|
||||
RequiredCompression: requiredCompression,
|
||||
OriginalCompression: originalCompression,
|
||||
TOCDigest: tocDigest,
|
||||
})
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", fmt.Errorf("trying to reuse blob %s at destination: %w", srcInfo.Digest, err)
|
||||
|
2
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/blobinfocache/types.go
generated
vendored
@ -36,7 +36,7 @@ type BlobInfoCache2 interface {
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
|
2
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/imagedestination/wrapper.go
generated
vendored
@ -28,7 +28,7 @@ type wrapped struct {
|
||||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
// without breaking any external implementers of a public interface.
|
||||
func FromPublic(dest types.ImageDestination) private.ImageDestination {
|
||||
if dest2, ok := dest.(private.ImageDestination); ok {
|
||||
return dest2
|
||||
|
2
vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
generated
vendored
2
vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go
generated
vendored
@ -27,7 +27,7 @@ type wrapped struct {
|
||||
//
|
||||
// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct
|
||||
// with public methods, or perhaps a private interface), so that we can add methods
|
||||
// without breaking any external implementors of a public interface.
|
||||
// without breaking any external implementers of a public interface.
|
||||
func FromPublic(src types.ImageSource) private.ImageSource {
|
||||
if src2, ok := src.(private.ImageSource); ok {
|
||||
return src2
|
||||
|
26
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
26
vendor/github.com/containers/image/v5/internal/manifest/manifest.go
generated
vendored
@ -3,6 +3,7 @@ package manifest
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/libtrust"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -14,7 +15,7 @@ import (
|
||||
const (
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
// DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
@ -165,3 +166,26 @@ func NormalizedMIMEType(input string) string {
|
||||
return DockerV2Schema1SignedMediaType
|
||||
}
|
||||
}
|
||||
|
||||
// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values.
|
||||
func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool {
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
case compressiontypes.GzipAlgorithmName:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo.
|
||||
func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool {
|
||||
if CompressionAlgorithmIsUniversallySupported(algo) {
|
||||
return true
|
||||
}
|
||||
switch algo.Name() { // Should this use InternalUnstableUndocumentedMIMEQuestionMark() ?
|
||||
case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName:
|
||||
return mimeType == imgspecv1.MediaTypeImageManifest
|
||||
default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
30
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
30
vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go
generated
vendored
@ -25,6 +25,7 @@ import (
|
||||
|
||||
"github.com/containers/image/v5/types"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
@ -82,15 +83,40 @@ func getCPUVariantWindows(arch string) string {
|
||||
func getCPUVariantArm() string {
|
||||
variant, err := getCPUInfo("Cpu architecture")
|
||||
if err != nil {
|
||||
logrus.Errorf("Couldn't get cpu architecture: %v", err)
|
||||
return ""
|
||||
}
|
||||
// TODO handle RPi Zero mismatch (https://github.com/moby/moby/pull/36121#issuecomment-398328286)
|
||||
|
||||
switch strings.ToLower(variant) {
|
||||
case "8", "aarch64":
|
||||
variant = "v8"
|
||||
case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
|
||||
case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)":
|
||||
variant = "v7"
|
||||
case "7":
|
||||
// handle RPi Zero variant mismatch due to wrong variant from kernel
|
||||
// https://github.com/containerd/containerd/pull/4530
|
||||
// https://www.raspberrypi.org/forums/viewtopic.php?t=12614
|
||||
// https://github.com/moby/moby/pull/36121#issuecomment-398328286
|
||||
model, err := getCPUInfo("model name")
|
||||
if err != nil {
|
||||
logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err)
|
||||
return ""
|
||||
}
|
||||
// model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection,
|
||||
// https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35
|
||||
// (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 )
|
||||
// ARM CPU ID starts with a “main” ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en ,
|
||||
// but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en .
|
||||
// The Linux "cpu architecture" is determined by a “memory model” feature.
|
||||
//
|
||||
// So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of
|
||||
// https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S .
|
||||
if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") {
|
||||
logrus.Debugf("Detected corner case, setting cpu variant to v6")
|
||||
variant = "v6"
|
||||
} else {
|
||||
variant = "v7"
|
||||
}
|
||||
case "6", "6tej":
|
||||
variant = "v6"
|
||||
case "5", "5t", "5te", "5tej":
|
||||
|
1
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
1
vendor/github.com/containers/image/v5/internal/private/private.go
generated
vendored
@ -117,6 +117,7 @@ type TryReusingBlobOptions struct {
|
||||
EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented.
|
||||
LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise.
|
||||
SrcRef reference.Named // A reference to the source image that contains the input blob.
|
||||
TOCDigest *digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest.
|
||||
}
|
||||
|
||||
// ReusedBlob is information about a blob reused in a destination.
|
||||
|
15
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
15
vendor/github.com/containers/image/v5/manifest/docker_schema1.go
generated
vendored
@ -10,6 +10,7 @@ import (
|
||||
"github.com/containers/image/v5/docker/reference"
|
||||
"github.com/containers/image/v5/internal/manifest"
|
||||
"github.com/containers/image/v5/internal/set"
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/regexp"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
@ -142,6 +143,15 @@ func (m *Schema1) LayerInfos() []LayerInfo {
|
||||
return layers
|
||||
}
|
||||
|
||||
const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets
|
||||
var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{
|
||||
{
|
||||
mtsUncompressed: fakeSchema1MIMEType,
|
||||
compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType,
|
||||
compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType,
|
||||
},
|
||||
}
|
||||
|
||||
// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers)
|
||||
func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
// Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well.
|
||||
@ -150,6 +160,11 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error {
|
||||
}
|
||||
m.FSLayers = make([]Schema1FSLayers, len(layerInfos))
|
||||
for i, info := range layerInfos {
|
||||
// There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms,
|
||||
// in a way that is consistent with the other schema implementations.
|
||||
if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil {
|
||||
return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err)
|
||||
}
|
||||
// (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest,
|
||||
// but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
|
||||
// So, we don't bother recomputing the IDs in m.History.V1Compatibility.
|
||||
|
2
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
2
vendor/github.com/containers/image/v5/manifest/manifest.go
generated
vendored
@ -16,7 +16,7 @@ import (
|
||||
const (
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
|
||||
DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType
|
||||
// DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
// DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature
|
||||
DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType
|
||||
// DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
|
||||
DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType
|
||||
|
38
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
38
vendor/github.com/containers/image/v5/manifest/oci.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
compressiontypes "github.com/containers/image/v5/pkg/compression/types"
|
||||
"github.com/containers/image/v5/types"
|
||||
ociencspec "github.com/containers/ocicrypt/spec"
|
||||
chunkedToc "github.com/containers/storage/pkg/chunked/toc"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/image-spec/specs-go"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -235,7 +236,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type
|
||||
}
|
||||
|
||||
// ImageID computes an ID which can uniquely identify this image by its contents.
|
||||
func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
||||
func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) {
|
||||
// The way m.Config.Digest “uniquely identifies” an image is
|
||||
// by containing RootFS.DiffIDs, which identify the layers of the image.
|
||||
// For non-image artifacts, the we can’t expect the config to change
|
||||
@ -259,9 +260,44 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) {
|
||||
if err := m.Config.Digest.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// If there is any layer that is using partial content, we calculate the image ID
|
||||
// in a different way since the diffID cannot be validated as for regular pulled images.
|
||||
for _, layer := range m.Layers {
|
||||
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
|
||||
}
|
||||
if toc != nil {
|
||||
return m.calculateImageIDForPartialImage(diffIDs)
|
||||
}
|
||||
}
|
||||
|
||||
return m.Config.Digest.Hex(), nil
|
||||
}
|
||||
|
||||
func (m *OCI1) calculateImageIDForPartialImage(diffIDs []digest.Digest) (string, error) {
|
||||
newID := digest.Canonical.Digester()
|
||||
for i, layer := range m.Layers {
|
||||
diffID := diffIDs[i]
|
||||
_, err := newID.Hash().Write([]byte(diffID.Hex()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing diffID %q: %w", diffID, err)
|
||||
}
|
||||
toc, err := chunkedToc.GetTOCDigest(layer.Annotations)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error looking up annotation for layer %q: %w", layer.Digest, err)
|
||||
}
|
||||
if toc != nil {
|
||||
_, err = newID.Hash().Write([]byte(toc.Hex()))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error writing TOC %q: %w", toc, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newID.Digest().Hex(), nil
|
||||
}
|
||||
|
||||
// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image
|
||||
// (and the code can handle that).
|
||||
// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted
|
||||
|
8
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
8
vendor/github.com/containers/image/v5/oci/archive/oci_dest.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"github.com/containers/image/v5/internal/signature"
|
||||
"github.com/containers/image/v5/types"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@ -169,10 +170,15 @@ func (d *ociArchiveImageDestination) Commit(ctx context.Context, unparsedTopleve
|
||||
// tar converts the directory at src and saves it to dst
|
||||
func tarDirectory(src, dst string) error {
|
||||
// input is a stream of bytes from the archive of the directory at path
|
||||
input, err := archive.Tar(src, archive.Uncompressed)
|
||||
input, err := archive.TarWithOptions(src, &archive.TarOptions{
|
||||
Compression: archive.Uncompressed,
|
||||
// Don’t include the data about the user account this code is running under.
|
||||
ChownOpts: &idtools.IDPair{UID: 0, GID: 0},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("retrieving stream of bytes from %q: %w", src, err)
|
||||
}
|
||||
defer input.Close()
|
||||
|
||||
// creates the tar file
|
||||
outFile, err := os.Create(dst)
|
||||
|
@ -91,11 +91,11 @@ func min(a, b int) int {
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with parameters for the
|
||||
// number of entries to limit for known and unknown location separately, only to make testing simpler.
|
||||
// TODO: following function is not destructive any more in the nature instead priortized result is actually copies of the original
|
||||
// TODO: following function is not destructive any more in the nature instead prioritized result is actually copies of the original
|
||||
// candidate set, so In future we might wanna re-name this public API and remove the destructive prefix.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, totalLimit int, noLocationLimit int) []blobinfocache.BICReplacementCandidate2 {
|
||||
// split unknown candidates and known candidates
|
||||
// and limit them seperately.
|
||||
// and limit them separately.
|
||||
var knownLocationCandidates []CandidateWithTime
|
||||
var unknownLocationCandidates []CandidateWithTime
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
|
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
2
vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go
generated
vendored
@ -184,7 +184,7 @@ func (mem *cache) CandidateLocations(transport types.ImageTransport, scope types
|
||||
// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 {
|
||||
|
4
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
4
vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go
generated
vendored
@ -171,7 +171,7 @@ func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
|
||||
// dbTransaction calls fn within a read-write transaction in db.
|
||||
func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) {
|
||||
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive dicussion.
|
||||
// Ideally we should be able to distinguish between read-only and read-write transactions, see the _txlock=exclusive discussion.
|
||||
|
||||
var zeroRes T // A zero value of T
|
||||
|
||||
@ -496,7 +496,7 @@ func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateW
|
||||
// that could possibly be reused within the specified (transport scope) (if they still
|
||||
// exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if
|
||||
// If !canSubstitute, the returned candidates will match the submitted digest exactly; if
|
||||
// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look
|
||||
// up variants of the blob which have the same uncompressed digest.
|
||||
//
|
||||
|
3
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/fulcio_cert.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
//go:build !containers_image_fulcio_stub
|
||||
// +build !containers_image_fulcio_stub
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
|
28
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
Normal file
28
vendor/github.com/containers/image/v5/signature/fulcio_cert_stub.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
//go:build containers_image_fulcio_stub
|
||||
// +build containers_image_fulcio_stub
|
||||
|
||||
package signature
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type fulcioTrustRoot struct {
|
||||
caCertificates *x509.CertPool
|
||||
oidcIssuer string
|
||||
subjectEmail string
|
||||
}
|
||||
|
||||
func (f *fulcioTrustRoot) validate() error {
|
||||
return errors.New("fulcio disabled at compile-time")
|
||||
}
|
||||
|
||||
func verifyRekorFulcio(rekorPublicKey *ecdsa.PublicKey, fulcioTrustRoot *fulcioTrustRoot, untrustedRekorSET []byte,
|
||||
untrustedCertificateBytes []byte, untrustedIntermediateChainBytes []byte, untrustedBase64Signature string,
|
||||
untrustedPayloadBytes []byte) (crypto.PublicKey, error) {
|
||||
return nil, errors.New("fulcio disabled at compile-time")
|
||||
|
||||
}
|
3
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/internal/rekor_set.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
//go:build !containers_image_rekor_stub
|
||||
// +build !containers_image_rekor_stub
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
|
15
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
Normal file
15
vendor/github.com/containers/image/v5/signature/internal/rekor_set_stub.go
generated
vendored
Normal file
@ -0,0 +1,15 @@
|
||||
//go:build containers_image_rekor_stub
|
||||
// +build containers_image_rekor_stub
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VerifyRekorSET verifies that unverifiedRekorSET is correctly signed by publicKey and matches the rest of the data.
|
||||
// Returns bundle upload time on success.
|
||||
func VerifyRekorSET(publicKey *ecdsa.PublicKey, unverifiedRekorSET []byte, unverifiedKeyOrCertBytes []byte, unverifiedBase64Signature string, unverifiedPayloadBytes []byte) (time.Time, error) {
|
||||
return time.Time{}, NewInvalidSignatureError("rekor disabled at compile-time")
|
||||
}
|
3
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
//go:build !containers_image_fulcio_stub
|
||||
// +build !containers_image_fulcio_stub
|
||||
|
||||
package fulcio
|
||||
|
||||
import (
|
||||
|
45
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio_stub.go
generated
vendored
Normal file
45
vendor/github.com/containers/image/v5/signature/sigstore/fulcio/fulcio_stub.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
//go:build containers_image_fulcio_stub
|
||||
// +build containers_image_fulcio_stub
|
||||
|
||||
package fulcio
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
|
||||
"github.com/containers/image/v5/signature/sigstore/internal"
|
||||
)
|
||||
|
||||
func WithFulcioAndPreexistingOIDCIDToken(fulcioURL *url.URL, oidcIDToken string) internal.Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
return fmt.Errorf("fulcio disabled at compile time")
|
||||
}
|
||||
}
|
||||
|
||||
// WithFulcioAndDeviceAuthorizationGrantOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate
|
||||
// based on an OIDC ID token obtained using a device authorization grant (RFC 8628).
|
||||
//
|
||||
// interactiveOutput must be directly accessible to a human user in real time (i.e. not be just a log file).
|
||||
func WithFulcioAndDeviceAuthorizationGrantOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string,
|
||||
interactiveOutput io.Writer) internal.Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
return fmt.Errorf("fulcio disabled at compile time")
|
||||
}
|
||||
}
|
||||
|
||||
// WithFulcioAndInterativeOIDC sets up signing to use a short-lived key and a Fulcio-issued certificate
|
||||
// based on an interactively-obtained OIDC ID token.
|
||||
// The token is obtained
|
||||
// - directly using a browser, listening on localhost, automatically opening a browser to the OIDC issuer,
|
||||
// to be redirected on localhost. (I.e. the current environment must allow launching a browser that connect back to the current process;
|
||||
// either or both may be impossible in a container or a remote VM).
|
||||
// - or by instructing the user to manually open a browser, obtain the OIDC code, and interactively input it as text.
|
||||
//
|
||||
// interactiveInput and interactiveOutput must both be directly operable by a human user in real time (i.e. not be just a log file).
|
||||
func WithFulcioAndInteractiveOIDC(fulcioURL *url.URL, oidcIssuerURL *url.URL, oidcClientID, oidcClientSecret string,
|
||||
interactiveInput io.Reader, interactiveOutput io.Writer) internal.Option {
|
||||
return func(s *internal.SigstoreSigner) error {
|
||||
return fmt.Errorf("fulcio disabled at compile time")
|
||||
}
|
||||
}
|
3
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor.go
generated
vendored
3
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor.go
generated
vendored
@ -1,3 +1,6 @@
|
||||
//go:build !containers_image_rekor_stub
|
||||
// +build !containers_image_rekor_stub
|
||||
|
||||
package rekor
|
||||
|
||||
import (
|
||||
|
17
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor_stub.go
generated
vendored
Normal file
17
vendor/github.com/containers/image/v5/signature/sigstore/rekor/rekor_stub.go
generated
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
//go:build containers_image_rekor_stub
|
||||
// +build containers_image_rekor_stub
|
||||
|
||||
package rekor
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
signerInternal "github.com/containers/image/v5/signature/sigstore/internal"
|
||||
)
|
||||
|
||||
func WithRekor(rekorURL *url.URL) signerInternal.Option {
|
||||
return func(s *signerInternal.SigstoreSigner) error {
|
||||
return fmt.Errorf("rekor disabled at build time")
|
||||
}
|
||||
}
|
209
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
209
vendor/github.com/containers/image/v5/storage/storage_dest.go
generated
vendored
@ -77,13 +77,13 @@ type storageImageDestination struct {
|
||||
indexToStorageID map[int]*string
|
||||
// All accesses to below data are protected by `lock` which is made
|
||||
// *explicit* in the code.
|
||||
blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
|
||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
|
||||
uncompressedOrTocDigest map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs or TOC IDs.
|
||||
fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes
|
||||
filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them
|
||||
currentIndex int // The index of the layer to be committed (i.e., lower indices have already been committed)
|
||||
indexToAddedLayerInfo map[int]addedLayerInfo // Mapping from layer (by index) to blob to add to the image
|
||||
blobAdditionalLayer map[digest.Digest]storage.AdditionalLayer // Mapping from layer blobsums to their corresponding additional layer
|
||||
diffOutputs map[digest.Digest]*graphdriver.DriverWithDifferOutput // Mapping from digest to differ output
|
||||
}
|
||||
|
||||
// addedLayerInfo records data about a layer to use in this image.
|
||||
@ -117,18 +117,18 @@ func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*
|
||||
HasThreadSafePutBlob: true,
|
||||
}),
|
||||
|
||||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
blobDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
indexToStorageID: make(map[int]*string),
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
|
||||
imageRef: imageRef,
|
||||
directory: directory,
|
||||
signatureses: make(map[digest.Digest][]byte),
|
||||
uncompressedOrTocDigest: make(map[digest.Digest]digest.Digest),
|
||||
blobAdditionalLayer: make(map[digest.Digest]storage.AdditionalLayer),
|
||||
fileSizes: make(map[digest.Digest]int64),
|
||||
filenames: make(map[digest.Digest]string),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
indexToStorageID: make(map[int]*string),
|
||||
indexToAddedLayerInfo: make(map[int]addedLayerInfo),
|
||||
diffOutputs: make(map[digest.Digest]*graphdriver.DriverWithDifferOutput),
|
||||
}
|
||||
dest.Compat = impl.AddCompat(dest)
|
||||
return dest, nil
|
||||
@ -227,7 +227,7 @@ func (s *storageImageDestination) putBlobToPendingFile(stream io.Reader, blobinf
|
||||
|
||||
// Record information about the blob.
|
||||
s.lock.Lock()
|
||||
s.blobDiffIDs[blobDigest] = diffID.Digest()
|
||||
s.uncompressedOrTocDigest[blobDigest] = diffID.Digest()
|
||||
s.fileSizes[blobDigest] = counter.Count
|
||||
s.filenames[blobDigest] = filename
|
||||
s.lock.Unlock()
|
||||
@ -276,7 +276,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
blobInfo: srcInfo,
|
||||
}
|
||||
|
||||
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||
differ, err := chunked.GetDiffer(ctx, s.imageRef.transport.store, srcInfo.Digest, srcInfo.Size, srcInfo.Annotations, &fetcher)
|
||||
if err != nil {
|
||||
return private.UploadedBlob{}, err
|
||||
}
|
||||
@ -289,7 +289,7 @@ func (s *storageImageDestination) PutBlobPartial(ctx context.Context, chunkAcces
|
||||
blobDigest := srcInfo.Digest
|
||||
|
||||
s.lock.Lock()
|
||||
s.blobDiffIDs[blobDigest] = blobDigest
|
||||
s.uncompressedOrTocDigest[blobDigest] = blobDigest
|
||||
s.fileSizes[blobDigest] = 0
|
||||
s.filenames[blobDigest] = ""
|
||||
s.diffOutputs[blobDigest] = out
|
||||
@ -321,7 +321,7 @@ func (s *storageImageDestination) TryReusingBlobWithOptions(ctx context.Context,
|
||||
})
|
||||
}
|
||||
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.blobDiffIDs and other metadata.
|
||||
// tryReusingBlobAsPending implements TryReusingBlobWithOptions for (digest, size or -1), filling s.uncompressedOrTocDigest and other metadata.
|
||||
// The caller must arrange the blob to be eventually committed using s.commitLayer().
|
||||
func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest, size int64, options *private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) {
|
||||
// lock the entire method as it executes fairly quickly
|
||||
@ -335,7 +335,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for compressed layers with digest %q and labels: %w`, digest, err)
|
||||
} else if err == nil {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[digest] = aLayer.UncompressedDigest()
|
||||
s.uncompressedOrTocDigest[digest] = aLayer.UncompressedDigest()
|
||||
s.blobAdditionalLayer[digest] = aLayer
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
@ -366,7 +366,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
@ -380,7 +380,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Size: layers[0].CompressedSize,
|
||||
@ -398,7 +398,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
if size != -1 {
|
||||
s.blobDiffIDs[digest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: digest,
|
||||
Size: size,
|
||||
@ -407,7 +407,7 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
if !options.CanSubstitute {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf("Internal error: options.CanSubstitute was expected to be true for blob with digest %s", digest)
|
||||
}
|
||||
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
s.uncompressedOrTocDigest[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
@ -416,6 +416,25 @@ func (s *storageImageDestination) tryReusingBlobAsPending(digest digest.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
tocDigest := digest
|
||||
if options.TOCDigest != nil {
|
||||
tocDigest = *options.TOCDigest
|
||||
}
|
||||
|
||||
// Check if we have a chunked layer in storage with the same TOC digest.
|
||||
layers, err = s.imageRef.transport.store.LayersByTOCDigest(tocDigest)
|
||||
if err != nil && !errors.Is(err, storage.ErrLayerUnknown) {
|
||||
return false, private.ReusedBlob{}, fmt.Errorf(`looking for layers with TOC digest %q: %w`, tocDigest, err)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.uncompressedOrTocDigest[digest] = layers[0].TOCDigest
|
||||
return true, private.ReusedBlob{
|
||||
Digest: layers[0].TOCDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Nope, we don't have it.
|
||||
return false, private.ReusedBlob{}, nil
|
||||
}
|
||||
@ -438,16 +457,20 @@ func (s *storageImageDestination) computeID(m manifest.Manifest) string {
|
||||
continue
|
||||
}
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
diffID, ok := s.blobDiffIDs[blobSum]
|
||||
diffID, ok := s.uncompressedOrTocDigest[blobSum]
|
||||
if !ok {
|
||||
logrus.Infof("error looking up diffID for layer %q", blobSum.String())
|
||||
return ""
|
||||
}
|
||||
diffIDs = append([]digest.Digest{diffID}, diffIDs...)
|
||||
}
|
||||
case *manifest.Schema2, *manifest.OCI1:
|
||||
// We know the ID calculation for these formats doesn't actually use the diffIDs,
|
||||
// so we don't need to populate the diffID list.
|
||||
case *manifest.Schema2:
|
||||
// We know the ID calculation doesn't actually use the diffIDs, so we don't need to populate
|
||||
// the diffID list.
|
||||
case *manifest.OCI1:
|
||||
for _, l := range m.Layers {
|
||||
diffIDs = append(diffIDs, l.Digest)
|
||||
}
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
@ -518,7 +541,7 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
}
|
||||
s.lock.Unlock()
|
||||
// Note: commitLayer locks on-demand.
|
||||
if err := s.commitLayer(index, info, -1); err != nil {
|
||||
if stopQueue, err := s.commitLayer(index, info, -1); stopQueue || err != nil {
|
||||
return err
|
||||
}
|
||||
s.lock.Lock()
|
||||
@ -532,18 +555,32 @@ func (s *storageImageDestination) queueOrCommit(index int, info addedLayerInfo)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDiffIDOrTOCDigest returns the diffID for the specified digest or the digest for the TOC, if known.
|
||||
func (s *storageImageDestination) getDiffIDOrTOCDigest(uncompressedDigest digest.Digest) (digest.Digest, bool) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if d, found := s.diffOutputs[uncompressedDigest]; found {
|
||||
return d.TOCDigest, found
|
||||
}
|
||||
d, found := s.uncompressedOrTocDigest[uncompressedDigest]
|
||||
return d, found
|
||||
}
|
||||
|
||||
// commitLayer commits the specified layer with the given index to the storage.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in blobDiffIDs.
|
||||
// size can usually be -1; it can be provided if the layer is not known to be already present in uncompressedOrTocDigest.
|
||||
//
|
||||
// If the layer cannot be committed yet, the function returns (true, nil).
|
||||
//
|
||||
// Note that the previous layer is expected to already be committed.
|
||||
//
|
||||
// Caution: this function must be called without holding `s.lock`. Callers
|
||||
// must guarantee that, at any given time, at most one goroutine may execute
|
||||
// `commitLayer()`.
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) error {
|
||||
func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, size int64) (bool, error) {
|
||||
// Already committed? Return early.
|
||||
if _, alreadyCommitted := s.indexToStorageID[index]; alreadyCommitted {
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Start with an empty string or the previous layer ID. Note that
|
||||
@ -557,68 +594,96 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
// Carry over the previous ID for empty non-base layers.
|
||||
if info.emptyLayer {
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if there's already a layer with the ID that we'd give to the result of applying
|
||||
// this layer blob to its parent, if it has one, or the blob's hex value otherwise.
|
||||
s.lock.Lock()
|
||||
diffID, haveDiffID := s.blobDiffIDs[info.digest]
|
||||
s.lock.Unlock()
|
||||
if !haveDiffID {
|
||||
// The diffIDOrTOCDigest refers either to the DiffID or the digest of the TOC.
|
||||
diffIDOrTOCDigest, haveDiffIDOrTOCDigest := s.getDiffIDOrTOCDigest(info.digest)
|
||||
if !haveDiffIDOrTOCDigest {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use none.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", info.digest)
|
||||
logrus.Debugf("looking for diffID or TOC digest for blob %+v", info.digest)
|
||||
// Use tryReusingBlobAsPending, not the top-level TryReusingBlobWithOptions, to prevent recursion via queueOrCommit.
|
||||
has, _, err := s.tryReusingBlobAsPending(info.digest, size, &private.TryReusingBlobOptions{
|
||||
Cache: none.NoCache,
|
||||
CanSubstitute: false,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
|
||||
return false, fmt.Errorf("checking for a layer based on blob %q: %w", info.digest.String(), err)
|
||||
}
|
||||
if !has {
|
||||
return fmt.Errorf("error determining uncompressed digest for blob %q", info.digest.String())
|
||||
return false, fmt.Errorf("error determining uncompressed digest or TOC digest for blob %q", info.digest.String())
|
||||
}
|
||||
diffID, haveDiffID = s.blobDiffIDs[info.digest]
|
||||
if !haveDiffID {
|
||||
return fmt.Errorf("we have blob %q, but don't know its uncompressed digest", info.digest.String())
|
||||
diffIDOrTOCDigest, haveDiffIDOrTOCDigest = s.getDiffIDOrTOCDigest(info.digest)
|
||||
if !haveDiffIDOrTOCDigest {
|
||||
return false, fmt.Errorf("we have blob %q, but don't know its uncompressed or TOC digest", info.digest.String())
|
||||
}
|
||||
}
|
||||
id := diffID.Hex()
|
||||
id := diffIDOrTOCDigest.Hex()
|
||||
if lastLayer != "" {
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex()
|
||||
id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffIDOrTOCDigest.Hex())).Hex()
|
||||
}
|
||||
if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil {
|
||||
// There's already a layer that should have the right contents, just reuse it.
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
diffOutput, ok := s.diffOutputs[info.digest]
|
||||
s.lock.Unlock()
|
||||
if ok {
|
||||
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
if s.manifest == nil {
|
||||
logrus.Debugf("Skipping commit for TOC=%q, manifest not yet available", id)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// FIXME: what to do with the uncompressed digest?
|
||||
diffOutput.UncompressedDigest = info.digest
|
||||
man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("parsing manifest: %w", err)
|
||||
}
|
||||
|
||||
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, nil); err != nil {
|
||||
cb, err := s.getConfigBlob(man.ConfigInfo())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// retrieve the expected uncompressed digest from the config blob.
|
||||
configOCI := &imgspecv1.Image{}
|
||||
if err := json.Unmarshal(cb, configOCI); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if index >= len(configOCI.RootFS.DiffIDs) {
|
||||
return false, fmt.Errorf("index %d out of range for configOCI.RootFS.DiffIDs", index)
|
||||
}
|
||||
|
||||
layer, err := s.imageRef.transport.store.CreateLayer(id, lastLayer, nil, "", false, nil)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// let the storage layer know what was the original uncompressed layer.
|
||||
flags := make(map[string]interface{})
|
||||
flags[expectedLayerDiffIDFlag] = configOCI.RootFS.DiffIDs[index]
|
||||
logrus.Debugf("Setting uncompressed digest to %q for layer %q", configOCI.RootFS.DiffIDs[index], id)
|
||||
options := &graphdriver.ApplyDiffWithDifferOpts{
|
||||
Flags: flags,
|
||||
}
|
||||
|
||||
if err := s.imageRef.transport.store.ApplyDiffFromStagingDirectory(layer.ID, diffOutput.Target, diffOutput, options); err != nil {
|
||||
_ = s.imageRef.transport.store.Delete(layer.ID)
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
s.lock.Lock()
|
||||
@ -627,11 +692,11 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
if ok {
|
||||
layer, err := al.PutAs(id, lastLayer, nil)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return fmt.Errorf("failed to put layer from digest and labels: %w", err)
|
||||
return false, fmt.Errorf("failed to put layer from digest and labels: %w", err)
|
||||
}
|
||||
lastLayer = layer.ID
|
||||
s.indexToStorageID[index] = &lastLayer
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if we previously cached a file with that blob's contents. If we didn't,
|
||||
@ -642,7 +707,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
if !ok {
|
||||
// Try to find the layer with contents matching that blobsum.
|
||||
layer := ""
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffID)
|
||||
layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(diffIDOrTOCDigest)
|
||||
if err2 == nil && len(layers) > 0 {
|
||||
layer = layers[0].ID
|
||||
} else {
|
||||
@ -652,7 +717,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
}
|
||||
}
|
||||
if layer == "" {
|
||||
return fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
|
||||
return false, fmt.Errorf("locating layer for blob %q: %w", info.digest, err2)
|
||||
}
|
||||
// Read the layer's contents.
|
||||
noCompression := archive.Uncompressed
|
||||
@ -661,17 +726,17 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
}
|
||||
diff, err2 := s.imageRef.transport.store.Diff("", layer, diffOptions)
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
|
||||
return false, fmt.Errorf("reading layer %q for blob %q: %w", layer, info.digest, err2)
|
||||
}
|
||||
// Copy the layer diff to a file. Diff() takes a lock that it holds
|
||||
// until the ReadCloser that it returns is closed, and PutLayer() wants
|
||||
// the same lock, so the diff can't just be directly streamed from one
|
||||
// to the other.
|
||||
filename = s.computeNextBlobCacheFile()
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0o600)
|
||||
if err != nil {
|
||||
diff.Close()
|
||||
return fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
return false, fmt.Errorf("creating temporary file %q: %w", filename, err)
|
||||
}
|
||||
// Copy the data to the file.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using
|
||||
@ -680,7 +745,7 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
diff.Close()
|
||||
file.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
return false, fmt.Errorf("storing blob to file %q: %w", filename, err)
|
||||
}
|
||||
// Make sure that we can find this file later, should we need the layer's
|
||||
// contents again.
|
||||
@ -691,21 +756,21 @@ func (s *storageImageDestination) commitLayer(index int, info addedLayerInfo, si
|
||||
// Read the cached blob and use it as a diff.
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening file %q: %w", filename, err)
|
||||
return false, fmt.Errorf("opening file %q: %w", filename, err)
|
||||
}
|
||||
defer file.Close()
|
||||
// Build the new layer using the diff, regardless of where it came from.
|
||||
// TODO: This can take quite some time, and should ideally be cancellable using ctx.Done().
|
||||
layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, &storage.LayerOptions{
|
||||
OriginalDigest: info.digest,
|
||||
UncompressedDigest: diffID,
|
||||
UncompressedDigest: diffIDOrTOCDigest,
|
||||
}, file)
|
||||
if err != nil && !errors.Is(err, storage.ErrDuplicateID) {
|
||||
return fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
|
||||
return false, fmt.Errorf("adding layer with blob %q: %w", info.digest, err)
|
||||
}
|
||||
|
||||
s.indexToStorageID[index] = &layer.ID
|
||||
return nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Commit marks the process of storing the image as successful and asks for the image to be persisted.
|
||||
@ -752,11 +817,13 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t
|
||||
|
||||
// Extract, commit, or find the layers.
|
||||
for i, blob := range layerBlobs {
|
||||
if err := s.commitLayer(i, addedLayerInfo{
|
||||
if stopQueue, err := s.commitLayer(i, addedLayerInfo{
|
||||
digest: blob.Digest,
|
||||
emptyLayer: blob.EmptyLayer,
|
||||
}, blob.Size); err != nil {
|
||||
return err
|
||||
} else if stopQueue {
|
||||
return fmt.Errorf("Internal error: storageImageDestination.Commit(): commitLayer() not ready to commit for layer %q", blob.Digest)
|
||||
}
|
||||
}
|
||||
var lastLayer string
|
||||
|
89
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
89
vendor/github.com/containers/image/v5/storage/storage_src.go
generated
vendored
@ -29,21 +29,33 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// getBlobMutexProtected is a struct to hold the state of the getBlobMutex mutex.
|
||||
type getBlobMutexProtected struct {
|
||||
// digestToLayerID is a lookup map from the layer digest (either the uncompressed digest or the TOC digest) to the
|
||||
// layer ID in the store.
|
||||
digestToLayerID map[digest.Digest]string
|
||||
|
||||
// layerPosition stores where we are in reading a blob's layers
|
||||
layerPosition map[digest.Digest]int
|
||||
}
|
||||
|
||||
type storageImageSource struct {
|
||||
impl.Compat
|
||||
impl.PropertyMethodsInitialize
|
||||
stubs.NoGetBlobAtInitialize
|
||||
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
layerPosition map[digest.Digest]int // Where we are in reading a blob's layers
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
|
||||
imageRef storageReference
|
||||
image *storage.Image
|
||||
systemContext *types.SystemContext // SystemContext used in GetBlob() to create temporary files
|
||||
cachedManifest []byte // A cached copy of the manifest, if already known, or nil
|
||||
getBlobMutex sync.Mutex // Mutex to sync state for parallel GetBlob executions (it guards layerPosition and digestToLayerID)
|
||||
getBlobMutexProtected getBlobMutexProtected
|
||||
SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice
|
||||
SignaturesSizes map[digest.Digest][]int `json:"signatures-sizes,omitempty"` // List of sizes of each signature slice
|
||||
}
|
||||
|
||||
const expectedLayerDiffIDFlag = "expected-layer-diffid"
|
||||
|
||||
// newImageSource sets up an image for reading.
|
||||
func newImageSource(sys *types.SystemContext, imageRef storageReference) (*storageImageSource, error) {
|
||||
// First, locate the image.
|
||||
@ -62,9 +74,12 @@ func newImageSource(sys *types.SystemContext, imageRef storageReference) (*stora
|
||||
imageRef: imageRef,
|
||||
systemContext: sys,
|
||||
image: img,
|
||||
layerPosition: make(map[digest.Digest]int),
|
||||
SignatureSizes: []int{},
|
||||
SignaturesSizes: make(map[digest.Digest][]int),
|
||||
getBlobMutexProtected: getBlobMutexProtected{
|
||||
digestToLayerID: make(map[digest.Digest]string),
|
||||
layerPosition: make(map[digest.Digest]int),
|
||||
},
|
||||
}
|
||||
image.Compat = impl.AddCompat(image)
|
||||
if img.Metadata != "" {
|
||||
@ -91,6 +106,7 @@ func (s *storageImageSource) Close() error {
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
// We need a valid digest value.
|
||||
digest := info.Digest
|
||||
|
||||
err = digest.Validate()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@ -100,10 +116,24 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c
|
||||
return io.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
|
||||
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
|
||||
// callers should try to retrieve layers using their uncompressed digests, so no need to
|
||||
// check if they're using one of the compressed digests, which we can't reproduce anyway.
|
||||
layers, _ := s.imageRef.transport.store.LayersByUncompressedDigest(digest)
|
||||
var layers []storage.Layer
|
||||
|
||||
// If the digest was overridden by LayerInfosForCopy, then we need to use the TOC digest
|
||||
// to retrieve it from the storage.
|
||||
s.getBlobMutex.Lock()
|
||||
layerID, found := s.getBlobMutexProtected.digestToLayerID[digest]
|
||||
s.getBlobMutex.Unlock()
|
||||
|
||||
if found {
|
||||
if layer, err := s.imageRef.transport.store.Layer(layerID); err == nil {
|
||||
layers = []storage.Layer{*layer}
|
||||
}
|
||||
} else {
|
||||
// Check if the blob corresponds to a diff that was used to initialize any layers. Our
|
||||
// callers should try to retrieve layers using their uncompressed digests, so no need to
|
||||
// check if they're using one of the compressed digests, which we can't reproduce anyway.
|
||||
layers, _ = s.imageRef.transport.store.LayersByUncompressedDigest(digest)
|
||||
}
|
||||
|
||||
// If it's not a layer, then it must be a data item.
|
||||
if len(layers) == 0 {
|
||||
@ -174,8 +204,8 @@ func (s *storageImageSource) getBlobAndLayerID(digest digest.Digest, layers []st
|
||||
// which claim to have the same contents, that we actually do have multiple layers, otherwise we could
|
||||
// just go ahead and use the first one every time.
|
||||
s.getBlobMutex.Lock()
|
||||
i := s.layerPosition[digest]
|
||||
s.layerPosition[digest] = i + 1
|
||||
i := s.getBlobMutexProtected.layerPosition[digest]
|
||||
s.getBlobMutexProtected.layerPosition[digest] = i + 1
|
||||
s.getBlobMutex.Unlock()
|
||||
if len(layers) > 0 {
|
||||
layer = layers[i%len(layers)]
|
||||
@ -267,14 +297,35 @@ func (s *storageImageSource) LayerInfosForCopy(ctx context.Context, instanceDige
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading layer %q in image %q: %w", layerID, s.image.ID, err)
|
||||
}
|
||||
if layer.UncompressedDigest == "" {
|
||||
return nil, fmt.Errorf("uncompressed digest for layer %q is unknown", layerID)
|
||||
if layer.UncompressedDigest == "" && layer.TOCDigest == "" {
|
||||
return nil, fmt.Errorf("uncompressed digest and TOC digest for layer %q is unknown", layerID)
|
||||
}
|
||||
if layer.UncompressedSize < 0 {
|
||||
return nil, fmt.Errorf("uncompressed size for layer %q is unknown", layerID)
|
||||
}
|
||||
|
||||
blobDigest := layer.UncompressedDigest
|
||||
|
||||
if layer.TOCDigest != "" {
|
||||
if layer.Flags == nil || layer.Flags[expectedLayerDiffIDFlag] == nil {
|
||||
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not set", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
|
||||
}
|
||||
if expectedDigest, ok := layer.Flags[expectedLayerDiffIDFlag].(string); ok {
|
||||
// if the layer is stored by its TOC, report the expected diffID as the layer Digest
|
||||
// but store the TOC digest so we can later retrieve it from the storage.
|
||||
blobDigest, err = digest.Parse(expectedDigest)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing expected diffID %q for layer %q: %w", expectedDigest, layerID, err)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("TOC digest %q for layer %q is present but %q flag is not a string", layer.TOCDigest, layerID, expectedLayerDiffIDFlag)
|
||||
}
|
||||
}
|
||||
s.getBlobMutex.Lock()
|
||||
s.getBlobMutexProtected.digestToLayerID[blobDigest] = layer.ID
|
||||
s.getBlobMutex.Unlock()
|
||||
blobInfo := types.BlobInfo{
|
||||
Digest: layer.UncompressedDigest,
|
||||
Digest: blobDigest,
|
||||
Size: layer.UncompressedSize,
|
||||
MediaType: uncompressedLayerType,
|
||||
}
|
||||
@ -384,7 +435,7 @@ func (s *storageImageSource) getSize() (int64, error) {
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 {
|
||||
if (layer.TOCDigest == "" && layer.UncompressedDigest == "") || layer.UncompressedSize < 0 {
|
||||
return -1, fmt.Errorf("size for layer %q is unknown, failing getSize()", layerID)
|
||||
}
|
||||
sum += layer.UncompressedSize
|
||||
|
2
vendor/github.com/containers/image/v5/storage/storage_transport.go
generated
vendored
2
vendor/github.com/containers/image/v5/storage/storage_transport.go
generated
vendored
@ -213,7 +213,7 @@ func (s *storageTransport) GetStore() (storage.Store, error) {
|
||||
// Return the transport's previously-set store. If we don't have one
|
||||
// of those, initialize one now.
|
||||
if s.store == nil {
|
||||
options, err := storage.DefaultStoreOptionsAutoDetectUID()
|
||||
options, err := storage.DefaultStoreOptions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
4
vendor/github.com/containers/image/v5/version/version.go
generated
vendored
@ -8,10 +8,10 @@ const (
|
||||
// VersionMinor is for functionality in a backwards-compatible manner
|
||||
VersionMinor = 29
|
||||
// VersionPatch is for backwards-compatible bug fixes
|
||||
VersionPatch = 1
|
||||
VersionPatch = 2
|
||||
|
||||
// VersionDev indicates development branch. Releases will be empty string.
|
||||
VersionDev = ""
|
||||
VersionDev = "-dev"
|
||||
)
|
||||
|
||||
// Version is the specification version that the package types support.
|
||||
|
8
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
8
vendor/github.com/containers/storage/.cirrus.yml
generated
vendored
@ -17,13 +17,13 @@ env:
|
||||
####
|
||||
#### Cache-image names to test with (double-quotes around names are critical)
|
||||
###
|
||||
FEDORA_NAME: "fedora-39ß"
|
||||
FEDORA_NAME: "fedora-39"
|
||||
DEBIAN_NAME: "debian-13"
|
||||
|
||||
# GCE project where images live
|
||||
IMAGE_PROJECT: "libpod-218412"
|
||||
# VM Image built in containers/automation_images
|
||||
IMAGE_SUFFIX: "c20231004t194547z-f39f38d13"
|
||||
IMAGE_SUFFIX: "c20231208t193858z-f39f38d13"
|
||||
FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}"
|
||||
DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}"
|
||||
|
||||
@ -167,7 +167,7 @@ vendor_task:
|
||||
|
||||
cross_task:
|
||||
container:
|
||||
image: golang:1.19
|
||||
image: golang:1.20
|
||||
build_script: make cross
|
||||
|
||||
|
||||
@ -181,6 +181,6 @@ success_task:
|
||||
- vendor
|
||||
- cross
|
||||
container:
|
||||
image: golang:1.19
|
||||
image: golang:1.20
|
||||
clone_script: 'mkdir -p "$CIRRUS_WORKING_DIR"' # Source code not needed
|
||||
script: /bin/true
|
||||
|
2
vendor/github.com/containers/storage/VERSION
generated
vendored
2
vendor/github.com/containers/storage/VERSION
generated
vendored
@ -1 +1 @@
|
||||
1.51.0
|
||||
1.52.0
|
||||
|
11
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
11
vendor/github.com/containers/storage/drivers/driver.go
generated
vendored
@ -73,6 +73,13 @@ type ApplyDiffOpts struct {
|
||||
ForceMask *os.FileMode
|
||||
}
|
||||
|
||||
// ApplyDiffWithDifferOpts contains optional arguments for ApplyDiffWithDiffer methods.
|
||||
type ApplyDiffWithDifferOpts struct {
|
||||
ApplyDiffOpts
|
||||
|
||||
Flags map[string]interface{}
|
||||
}
|
||||
|
||||
// InitFunc initializes the storage driver.
|
||||
type InitFunc func(homedir string, options Options) (Driver, error)
|
||||
|
||||
@ -223,9 +230,9 @@ type DriverWithDiffer interface {
|
||||
Driver
|
||||
// ApplyDiffWithDiffer applies the changes using the callback function.
|
||||
// If id is empty, then a staging directory is created. The staging directory is guaranteed to be usable with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
ApplyDiffWithDiffer(id, parent string, options *ApplyDiffWithDifferOpts, differ Differ) (output DriverWithDifferOutput, err error)
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffOpts) error
|
||||
ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *DriverWithDifferOutput, options *ApplyDiffWithDifferOpts) error
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
// DifferTarget gets the location where files are stored for the layer.
|
||||
|
@ -1,5 +1,5 @@
|
||||
//go:build linux && composefs && cgo
|
||||
// +build linux,composefs,cgo
|
||||
//go:build linux && cgo
|
||||
// +build linux,cgo
|
||||
|
||||
package overlay
|
||||
|
||||
@ -34,11 +34,6 @@ func getComposeFsHelper() (string, error) {
|
||||
return composeFsHelperPath, composeFsHelperErr
|
||||
}
|
||||
|
||||
func composeFsSupported() bool {
|
||||
_, err := getComposeFsHelper()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
func enableVerity(description string, fd int) error {
|
||||
enableArg := unix.FsverityEnableArg{
|
||||
Version: 1,
|
24
vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go
generated
vendored
24
vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go
generated
vendored
@ -1,24 +0,0 @@
|
||||
//go:build !linux || !composefs || !cgo
|
||||
// +build !linux !composefs !cgo
|
||||
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func composeFsSupported() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func generateComposeFsBlob(verityDigests map[string]string, toc interface{}, composefsDir string) error {
|
||||
return fmt.Errorf("composefs is not supported")
|
||||
}
|
||||
|
||||
func mountComposefsBlob(dataDir, mountPoint string) error {
|
||||
return fmt.Errorf("composefs is not supported")
|
||||
}
|
||||
|
||||
func enableVerityRecursive(path string) (map[string]string, error) {
|
||||
return nil, fmt.Errorf("composefs is not supported")
|
||||
}
|
60
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
60
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -105,6 +105,7 @@ type overlayOptions struct {
|
||||
mountOptions string
|
||||
ignoreChownErrors bool
|
||||
forceMask *os.FileMode
|
||||
useComposefs bool
|
||||
}
|
||||
|
||||
// Driver contains information about the home directory and the list of active mounts that are created using this driver.
|
||||
@ -122,6 +123,7 @@ type Driver struct {
|
||||
supportsDType bool
|
||||
supportsVolatile *bool
|
||||
usingMetacopy bool
|
||||
usingComposefs bool
|
||||
|
||||
supportsIDMappedMounts *bool
|
||||
}
|
||||
@ -387,6 +389,22 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
}
|
||||
}
|
||||
|
||||
if opts.useComposefs {
|
||||
if unshare.IsRootless() {
|
||||
return nil, fmt.Errorf("composefs is not supported in user namespaces")
|
||||
}
|
||||
supportsDataOnly, err := supportsDataOnlyLayersCached(home, runhome)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !supportsDataOnly {
|
||||
return nil, fmt.Errorf("composefs is not supported on this kernel: %w", graphdriver.ErrIncompatibleFS)
|
||||
}
|
||||
if _, err := getComposeFsHelper(); err != nil {
|
||||
return nil, fmt.Errorf("composefs helper program not found: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var usingMetacopy bool
|
||||
var supportsDType bool
|
||||
var supportsVolatile *bool
|
||||
@ -448,6 +466,7 @@ func Init(home string, options graphdriver.Options) (graphdriver.Driver, error)
|
||||
supportsDType: supportsDType,
|
||||
usingMetacopy: usingMetacopy,
|
||||
supportsVolatile: supportsVolatile,
|
||||
usingComposefs: opts.useComposefs,
|
||||
options: *opts,
|
||||
}
|
||||
|
||||
@ -555,6 +574,12 @@ func parseOptions(options []string) (*overlayOptions, error) {
|
||||
withReference: withReference,
|
||||
})
|
||||
}
|
||||
case "use_composefs":
|
||||
logrus.Debugf("overlay: use_composefs=%s", val)
|
||||
o.useComposefs, err = strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "mount_program":
|
||||
logrus.Debugf("overlay: mount_program=%s", val)
|
||||
if val != "" {
|
||||
@ -782,7 +807,7 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI
|
||||
}
|
||||
|
||||
func (d *Driver) useNaiveDiff() bool {
|
||||
if d.useComposeFs() {
|
||||
if d.usingComposefs {
|
||||
return true
|
||||
}
|
||||
|
||||
@ -2002,6 +2027,9 @@ func (d *Driver) getStagingDir() string {
|
||||
// contains files for the layer differences, either for this layer, or one of our
|
||||
// lowers if we're just a template directory. Used for direct access for tar-split.
|
||||
func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
|
||||
if d.usingComposefs {
|
||||
return nil, nil
|
||||
}
|
||||
p, err := d.getDiffPath(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -2018,9 +2046,9 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
return os.RemoveAll(stagingDirectory)
|
||||
}
|
||||
|
||||
func (d *Driver) supportsDataOnlyLayers() (bool, error) {
|
||||
func supportsDataOnlyLayersCached(home, runhome string) (bool, error) {
|
||||
feature := "dataonly-layers"
|
||||
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(d.runhome, feature)
|
||||
overlayCacheResult, overlayCacheText, err := cachedFeatureCheck(runhome, feature)
|
||||
if err == nil {
|
||||
if overlayCacheResult {
|
||||
logrus.Debugf("Cached value indicated that data-only layers for overlay are supported")
|
||||
@ -2029,27 +2057,15 @@ func (d *Driver) supportsDataOnlyLayers() (bool, error) {
|
||||
logrus.Debugf("Cached value indicated that data-only layers for overlay are not supported")
|
||||
return false, errors.New(overlayCacheText)
|
||||
}
|
||||
supportsDataOnly, err := supportsDataOnlyLayers(d.home)
|
||||
if err2 := cachedFeatureRecord(d.runhome, feature, supportsDataOnly, ""); err2 != nil {
|
||||
supportsDataOnly, err := supportsDataOnlyLayers(home)
|
||||
if err2 := cachedFeatureRecord(runhome, feature, supportsDataOnly, ""); err2 != nil {
|
||||
return false, fmt.Errorf("recording overlay data-only layers support status: %w", err2)
|
||||
}
|
||||
return supportsDataOnly, err
|
||||
}
|
||||
|
||||
func (d *Driver) useComposeFs() bool {
|
||||
if !composeFsSupported() || unshare.IsRootless() {
|
||||
return false
|
||||
}
|
||||
supportsDataOnlyLayers, err := d.supportsDataOnlyLayers()
|
||||
if err != nil {
|
||||
logrus.Debugf("Check for data-only layers failed with: %v", err)
|
||||
return false
|
||||
}
|
||||
return supportsDataOnlyLayers
|
||||
}
|
||||
|
||||
// ApplyDiff applies the changes in the new layer using the specified function
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
|
||||
func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffWithDifferOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) {
|
||||
var idMappings *idtools.IDMappings
|
||||
if options != nil {
|
||||
idMappings = options.Mappings
|
||||
@ -2083,7 +2099,7 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
differOptions := graphdriver.DifferOptions{
|
||||
Format: graphdriver.DifferOutputFormatDir,
|
||||
}
|
||||
if d.useComposeFs() {
|
||||
if d.usingComposefs {
|
||||
differOptions.Format = graphdriver.DifferOutputFormatFlat
|
||||
}
|
||||
out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{
|
||||
@ -2100,12 +2116,12 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App
|
||||
}
|
||||
|
||||
// ApplyDiffFromStagingDirectory applies the changes using the specified staging directory.
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffOpts) error {
|
||||
func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory string, diffOutput *graphdriver.DriverWithDifferOutput, options *graphdriver.ApplyDiffWithDifferOpts) error {
|
||||
if filepath.Dir(stagingDirectory) != d.getStagingDir() {
|
||||
return fmt.Errorf("%q is not a staging directory", stagingDirectory)
|
||||
}
|
||||
|
||||
if d.useComposeFs() {
|
||||
if d.usingComposefs {
|
||||
// FIXME: move this logic into the differ so we don't have to open
|
||||
// the file twice.
|
||||
verityDigests, err := enableVerityRecursive(stagingDirectory)
|
||||
@ -2125,8 +2141,6 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri
|
||||
return err
|
||||
}
|
||||
|
||||
diffOutput.UncompressedDigest = diffOutput.TOCDigest
|
||||
|
||||
return os.Rename(stagingDirectory, diffPath)
|
||||
}
|
||||
|
||||
|
448
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
448
vendor/github.com/containers/storage/drivers/quota/projectquota.go
generated
vendored
@ -1,453 +1,5 @@
|
||||
//go:build linux && !exclude_disk_quota && cgo
|
||||
// +build linux,!exclude_disk_quota,cgo
|
||||
|
||||
//
|
||||
// projectquota.go - implements XFS project quota controls
|
||||
// for setting quota limits on a newly created directory.
|
||||
// It currently supports the legacy XFS specific ioctls.
|
||||
//
|
||||
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
|
||||
// for both xfs/ext4 for kernel version >= v4.5
|
||||
//
|
||||
|
||||
package quota
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <dirent.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/dqblk_xfs.h>
|
||||
|
||||
#ifndef FS_XFLAG_PROJINHERIT
|
||||
struct fsxattr {
|
||||
__u32 fsx_xflags;
|
||||
__u32 fsx_extsize;
|
||||
__u32 fsx_nextents;
|
||||
__u32 fsx_projid;
|
||||
unsigned char fsx_pad[12];
|
||||
};
|
||||
#define FS_XFLAG_PROJINHERIT 0x00000200
|
||||
#endif
|
||||
#ifndef FS_IOC_FSGETXATTR
|
||||
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
||||
#endif
|
||||
#ifndef FS_IOC_FSSETXATTR
|
||||
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
|
||||
#endif
|
||||
|
||||
#ifndef PRJQUOTA
|
||||
#define PRJQUOTA 2
|
||||
#endif
|
||||
#ifndef FS_PROJ_QUOTA
|
||||
#define FS_PROJ_QUOTA 2
|
||||
#endif
|
||||
#ifndef Q_XSETPQLIM
|
||||
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
|
||||
#endif
|
||||
#ifndef Q_XGETPQUOTA
|
||||
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const projectIDsAllocatedPerQuotaHome = 10000
|
||||
|
||||
// BackingFsBlockDeviceLink is the name of a file that we place in
|
||||
// the home directory of a driver that uses this package.
|
||||
const BackingFsBlockDeviceLink = "backingFsBlockDev"
|
||||
|
||||
// Quota limit params - currently we only control blocks hard limit and inodes
|
||||
type Quota struct {
|
||||
Size uint64
|
||||
Inodes uint64
|
||||
}
|
||||
|
||||
// Control - Context to be used by storage driver (e.g. overlay)
|
||||
// who wants to apply project quotas to container dirs
|
||||
type Control struct {
|
||||
backingFsBlockDev string
|
||||
nextProjectID uint32
|
||||
quotas *sync.Map
|
||||
basePath string
|
||||
}
|
||||
|
||||
// Attempt to generate a unigue projectid. Multiple directories
|
||||
// per file system can have quota and they need a group of unique
|
||||
// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
|
||||
// unique projectids, based on the inode of the basepath.
|
||||
func generateUniqueProjectID(path string) (uint32, error) {
|
||||
fileinfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
|
||||
}
|
||||
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
|
||||
return uint32(projectID), nil
|
||||
}
|
||||
|
||||
// NewControl - initialize project quota support.
|
||||
// Test to make sure that quota can be set on a test dir and find
|
||||
// the first project id to be used for the next container create.
|
||||
//
|
||||
// Returns nil (and error) if project quota is not supported.
|
||||
//
|
||||
// First get the project id of the basePath directory.
|
||||
// This test will fail if the backing fs is not xfs.
|
||||
//
|
||||
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
|
||||
// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
|
||||
// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
|
||||
// echo storage:100000 >> /etc/projid
|
||||
// echo volumes:200000 >> /etc/projid
|
||||
// xfs_quota -x -c 'project -s storage volumes' /<xfs mount point>
|
||||
//
|
||||
// In the example above, the storage directory project id will be used as a
|
||||
// "start offset" and all containers will be assigned larger project ids
|
||||
// (e.g. >= 100000). Then the volumes directory project id will be used as a
|
||||
// "start offset" and all volumes will be assigned larger project ids
|
||||
// (e.g. >= 200000).
|
||||
// This is a way to prevent xfs_quota management from conflicting with
|
||||
// containers/storage.
|
||||
|
||||
// Then try to create a test directory with the next project id and set a quota
|
||||
// on it. If that works, continue to scan existing containers to map allocated
|
||||
// project ids.
|
||||
func NewControl(basePath string) (*Control, error) {
|
||||
//
|
||||
// Get project id of parent dir as minimal id to be used by driver
|
||||
//
|
||||
minProjectID, err := getProjectID(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if minProjectID == 0 {
|
||||
// Indicates the storage was never initialized
|
||||
// Generate a unique range of Projectids for this basepath
|
||||
minProjectID, err = generateUniqueProjectID(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
//
|
||||
// create backing filesystem device node
|
||||
//
|
||||
backingFsBlockDev, err := makeBackingFsDev(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// Test if filesystem supports project quotas by trying to set
|
||||
// a quota on the first available project id
|
||||
//
|
||||
quota := Quota{
|
||||
Size: 0,
|
||||
Inodes: 0,
|
||||
}
|
||||
|
||||
q := Control{
|
||||
backingFsBlockDev: backingFsBlockDev,
|
||||
nextProjectID: minProjectID + 1,
|
||||
quotas: &sync.Map{},
|
||||
basePath: basePath,
|
||||
}
|
||||
|
||||
if err := q.setProjectQuota(minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// get first project id to be used for next container
|
||||
//
|
||||
err = q.findNextProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
|
||||
return &q, nil
|
||||
}
|
||||
|
||||
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||
// for that project id
|
||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
var projectID uint32
|
||||
value, ok := q.quotas.Load(targetPath)
|
||||
if ok {
|
||||
projectID, ok = value.(uint32)
|
||||
}
|
||||
if !ok {
|
||||
projectID = q.nextProjectID
|
||||
|
||||
//
|
||||
// assign project id to new container directory
|
||||
//
|
||||
err := setProjectID(targetPath, projectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.quotas.Store(targetPath, projectID)
|
||||
q.nextProjectID++
|
||||
}
|
||||
|
||||
//
|
||||
// set the quota limit for the container's project id
|
||||
//
|
||||
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||
return q.setProjectQuota(projectID, quota)
|
||||
}
|
||||
|
||||
// ClearQuota removes the map entry in the quotas map for targetPath.
|
||||
// It does so to prevent the map leaking entries as directories are deleted.
|
||||
func (q *Control) ClearQuota(targetPath string) {
|
||||
q.quotas.Delete(targetPath)
|
||||
}
|
||||
|
||||
// setProjectQuota - set the quota for project id on xfs block device
|
||||
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
||||
var d C.fs_disk_quota_t
|
||||
d.d_version = C.FS_DQUOT_VERSION
|
||||
d.d_id = C.__u32(projectID)
|
||||
d.d_flags = C.FS_PROJ_QUOTA
|
||||
|
||||
if quota.Size > 0 {
|
||||
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
|
||||
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
|
||||
d.d_blk_softlimit = d.d_blk_hardlimit
|
||||
}
|
||||
if quota.Inodes > 0 {
|
||||
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
|
||||
d.d_ino_hardlimit = C.__u64(quota.Inodes)
|
||||
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||
}
|
||||
|
||||
cs := C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
runQuotactl := func() syscall.Errno {
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return errno
|
||||
}
|
||||
|
||||
errno := runQuotactl()
|
||||
|
||||
// If the backingFsBlockDev does not exist any more then try to recreate it.
|
||||
if errors.Is(errno, unix.ENOENT) {
|
||||
if _, err := makeBackingFsDev(q.basePath); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
|
||||
q.backingFsBlockDev, projectID, err,
|
||||
)
|
||||
}
|
||||
|
||||
if errno := runQuotactl(); errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
} else if errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
||||
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||
d, err := q.fsDiskQuotaFromPath(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
||||
quota.Inodes = uint64(d.d_ino_hardlimit)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
|
||||
func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
|
||||
d, err := q.fsDiskQuotaFromPath(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usage.Size = int64(d.d_bcount) * 512
|
||||
usage.InodeCount = int64(d.d_icount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
|
||||
var d C.fs_disk_quota_t
|
||||
var projectID uint32
|
||||
value, ok := q.quotas.Load(targetPath)
|
||||
if ok {
|
||||
projectID, ok = value.(uint32)
|
||||
}
|
||||
if !ok {
|
||||
return d, fmt.Errorf("quota not found for path : %s", targetPath)
|
||||
}
|
||||
|
||||
//
|
||||
// get the quota limit for the container's project id
|
||||
//
|
||||
cs := C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// getProjectID - get the project id of path on xfs
|
||||
func getProjectID(targetPath string) (uint32, error) {
|
||||
dir, err := openDir(targetPath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
|
||||
return uint32(fsx.fsx_projid), nil
|
||||
}
|
||||
|
||||
// setProjectID - set the project id of path on xfs
|
||||
func setProjectID(targetPath string, projectID uint32) error {
|
||||
dir, err := openDir(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
fsx.fsx_projid = C.__u32(projectID)
|
||||
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
|
||||
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID() error {
|
||||
files, err := os.ReadDir(q.basePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read directory failed : %s", q.basePath)
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(q.basePath, file.Name())
|
||||
projid, err := getProjectID(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if projid > 0 {
|
||||
q.quotas.Store(path, projid)
|
||||
}
|
||||
if q.nextProjectID <= projid {
|
||||
q.nextProjectID = projid + 1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func free(p *C.char) {
|
||||
C.free(unsafe.Pointer(p))
|
||||
}
|
||||
|
||||
func openDir(path string) (*C.DIR, error) {
|
||||
Cpath := C.CString(path)
|
||||
defer free(Cpath)
|
||||
|
||||
dir, errno := C.opendir(Cpath)
|
||||
if dir == nil {
|
||||
return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
func closeDir(dir *C.DIR) {
|
||||
if dir != nil {
|
||||
C.closedir(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func getDirFd(dir *C.DIR) uintptr {
|
||||
return uintptr(C.dirfd(dir))
|
||||
}
|
||||
|
||||
// Get the backing block device of the driver home directory
|
||||
// and create a block device node under the home directory
|
||||
// to be used by quotactl commands
|
||||
func makeBackingFsDev(home string) (string, error) {
|
||||
var stat unix.Stat_t
|
||||
if err := unix.Stat(home, &stat); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
|
||||
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
|
||||
// Re-create just in case someone copied the home directory over to a new device
|
||||
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
|
||||
if !errors.Is(err, unix.EEXIST) {
|
||||
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
|
||||
}
|
||||
// On EEXIST, try again after unlinking any potential leftover.
|
||||
_ = unix.Unlink(backingFsBlockDevTmp)
|
||||
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
|
||||
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
|
||||
}
|
||||
}
|
||||
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
|
||||
return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
|
||||
}
|
||||
|
||||
return backingFsBlockDev, nil
|
||||
}
|
||||
|
449
vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
generated
vendored
Normal file
449
vendor/github.com/containers/storage/drivers/quota/projectquota_supported.go
generated
vendored
Normal file
@ -0,0 +1,449 @@
|
||||
//go:build linux && !exclude_disk_quota && cgo
|
||||
// +build linux,!exclude_disk_quota,cgo
|
||||
|
||||
//
|
||||
// projectquota.go - implements XFS project quota controls
|
||||
// for setting quota limits on a newly created directory.
|
||||
// It currently supports the legacy XFS specific ioctls.
|
||||
//
|
||||
// TODO: use generic quota control ioctl FS_IOC_FS{GET,SET}XATTR
|
||||
// for both xfs/ext4 for kernel version >= v4.5
|
||||
//
|
||||
|
||||
package quota
|
||||
|
||||
/*
|
||||
#include <stdlib.h>
|
||||
#include <dirent.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/dqblk_xfs.h>
|
||||
|
||||
#ifndef FS_XFLAG_PROJINHERIT
|
||||
struct fsxattr {
|
||||
__u32 fsx_xflags;
|
||||
__u32 fsx_extsize;
|
||||
__u32 fsx_nextents;
|
||||
__u32 fsx_projid;
|
||||
unsigned char fsx_pad[12];
|
||||
};
|
||||
#define FS_XFLAG_PROJINHERIT 0x00000200
|
||||
#endif
|
||||
#ifndef FS_IOC_FSGETXATTR
|
||||
#define FS_IOC_FSGETXATTR _IOR ('X', 31, struct fsxattr)
|
||||
#endif
|
||||
#ifndef FS_IOC_FSSETXATTR
|
||||
#define FS_IOC_FSSETXATTR _IOW ('X', 32, struct fsxattr)
|
||||
#endif
|
||||
|
||||
#ifndef PRJQUOTA
|
||||
#define PRJQUOTA 2
|
||||
#endif
|
||||
#ifndef FS_PROJ_QUOTA
|
||||
#define FS_PROJ_QUOTA 2
|
||||
#endif
|
||||
#ifndef Q_XSETPQLIM
|
||||
#define Q_XSETPQLIM QCMD(Q_XSETQLIM, PRJQUOTA)
|
||||
#endif
|
||||
#ifndef Q_XGETPQUOTA
|
||||
#define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containers/storage/pkg/directory"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const projectIDsAllocatedPerQuotaHome = 10000
|
||||
|
||||
// Quota limit params - currently we only control blocks hard limit and inodes
|
||||
type Quota struct {
|
||||
Size uint64
|
||||
Inodes uint64
|
||||
}
|
||||
|
||||
// Control - Context to be used by storage driver (e.g. overlay)
|
||||
// who wants to apply project quotas to container dirs
|
||||
type Control struct {
|
||||
backingFsBlockDev string
|
||||
nextProjectID uint32
|
||||
quotas *sync.Map
|
||||
basePath string
|
||||
}
|
||||
|
||||
// Attempt to generate a unigue projectid. Multiple directories
|
||||
// per file system can have quota and they need a group of unique
|
||||
// ids. This function attempts to allocate at least projectIDsAllocatedPerQuotaHome(10000)
|
||||
// unique projectids, based on the inode of the basepath.
|
||||
func generateUniqueProjectID(path string) (uint32, error) {
|
||||
fileinfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
stat, ok := fileinfo.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("not a syscall.Stat_t %s", path)
|
||||
}
|
||||
projectID := projectIDsAllocatedPerQuotaHome + (stat.Ino*projectIDsAllocatedPerQuotaHome)%(math.MaxUint32-projectIDsAllocatedPerQuotaHome)
|
||||
return uint32(projectID), nil
|
||||
}
|
||||
|
||||
// NewControl - initialize project quota support.
|
||||
// Test to make sure that quota can be set on a test dir and find
|
||||
// the first project id to be used for the next container create.
|
||||
//
|
||||
// Returns nil (and error) if project quota is not supported.
|
||||
//
|
||||
// First get the project id of the basePath directory.
|
||||
// This test will fail if the backing fs is not xfs.
|
||||
//
|
||||
// xfs_quota tool can be used to assign a project id to the driver home directory, e.g.:
|
||||
// echo 100000:/var/lib/containers/storage/overlay >> /etc/projects
|
||||
// echo 200000:/var/lib/containers/storage/volumes >> /etc/projects
|
||||
// echo storage:100000 >> /etc/projid
|
||||
// echo volumes:200000 >> /etc/projid
|
||||
// xfs_quota -x -c 'project -s storage volumes' /<xfs mount point>
|
||||
//
|
||||
// In the example above, the storage directory project id will be used as a
|
||||
// "start offset" and all containers will be assigned larger project ids
|
||||
// (e.g. >= 100000). Then the volumes directory project id will be used as a
|
||||
// "start offset" and all volumes will be assigned larger project ids
|
||||
// (e.g. >= 200000).
|
||||
// This is a way to prevent xfs_quota management from conflicting with
|
||||
// containers/storage.
|
||||
|
||||
// Then try to create a test directory with the next project id and set a quota
|
||||
// on it. If that works, continue to scan existing containers to map allocated
|
||||
// project ids.
|
||||
func NewControl(basePath string) (*Control, error) {
|
||||
//
|
||||
// Get project id of parent dir as minimal id to be used by driver
|
||||
//
|
||||
minProjectID, err := getProjectID(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if minProjectID == 0 {
|
||||
// Indicates the storage was never initialized
|
||||
// Generate a unique range of Projectids for this basepath
|
||||
minProjectID, err = generateUniqueProjectID(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
//
|
||||
// create backing filesystem device node
|
||||
//
|
||||
backingFsBlockDev, err := makeBackingFsDev(basePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// Test if filesystem supports project quotas by trying to set
|
||||
// a quota on the first available project id
|
||||
//
|
||||
quota := Quota{
|
||||
Size: 0,
|
||||
Inodes: 0,
|
||||
}
|
||||
|
||||
q := Control{
|
||||
backingFsBlockDev: backingFsBlockDev,
|
||||
nextProjectID: minProjectID + 1,
|
||||
quotas: &sync.Map{},
|
||||
basePath: basePath,
|
||||
}
|
||||
|
||||
if err := q.setProjectQuota(minProjectID, quota); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
//
|
||||
// get first project id to be used for next container
|
||||
//
|
||||
err = q.findNextProjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.Debugf("NewControl(%s): nextProjectID = %d", basePath, q.nextProjectID)
|
||||
return &q, nil
|
||||
}
|
||||
|
||||
// SetQuota - assign a unique project id to directory and set the quota limits
|
||||
// for that project id
|
||||
func (q *Control) SetQuota(targetPath string, quota Quota) error {
|
||||
var projectID uint32
|
||||
value, ok := q.quotas.Load(targetPath)
|
||||
if ok {
|
||||
projectID, ok = value.(uint32)
|
||||
}
|
||||
if !ok {
|
||||
projectID = q.nextProjectID
|
||||
|
||||
//
|
||||
// assign project id to new container directory
|
||||
//
|
||||
err := setProjectID(targetPath, projectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.quotas.Store(targetPath, projectID)
|
||||
q.nextProjectID++
|
||||
}
|
||||
|
||||
//
|
||||
// set the quota limit for the container's project id
|
||||
//
|
||||
logrus.Debugf("SetQuota path=%s, size=%d, inodes=%d, projectID=%d", targetPath, quota.Size, quota.Inodes, projectID)
|
||||
return q.setProjectQuota(projectID, quota)
|
||||
}
|
||||
|
||||
// ClearQuota removes the map entry in the quotas map for targetPath.
|
||||
// It does so to prevent the map leaking entries as directories are deleted.
|
||||
func (q *Control) ClearQuota(targetPath string) {
|
||||
q.quotas.Delete(targetPath)
|
||||
}
|
||||
|
||||
// setProjectQuota - set the quota for project id on xfs block device
|
||||
func (q *Control) setProjectQuota(projectID uint32, quota Quota) error {
|
||||
var d C.fs_disk_quota_t
|
||||
d.d_version = C.FS_DQUOT_VERSION
|
||||
d.d_id = C.__u32(projectID)
|
||||
d.d_flags = C.FS_PROJ_QUOTA
|
||||
|
||||
if quota.Size > 0 {
|
||||
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_BHARD | C.FS_DQ_BSOFT
|
||||
d.d_blk_hardlimit = C.__u64(quota.Size / 512)
|
||||
d.d_blk_softlimit = d.d_blk_hardlimit
|
||||
}
|
||||
if quota.Inodes > 0 {
|
||||
d.d_fieldmask = d.d_fieldmask | C.FS_DQ_IHARD | C.FS_DQ_ISOFT
|
||||
d.d_ino_hardlimit = C.__u64(quota.Inodes)
|
||||
d.d_ino_softlimit = d.d_ino_hardlimit
|
||||
}
|
||||
|
||||
cs := C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
runQuotactl := func() syscall.Errno {
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XSETPQLIM,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(d.d_id),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
return errno
|
||||
}
|
||||
|
||||
errno := runQuotactl()
|
||||
|
||||
// If the backingFsBlockDev does not exist any more then try to recreate it.
|
||||
if errors.Is(errno, unix.ENOENT) {
|
||||
if _, err := makeBackingFsDev(q.basePath); err != nil {
|
||||
return fmt.Errorf(
|
||||
"failed to recreate missing backingFsBlockDev %s for projid %d: %w",
|
||||
q.backingFsBlockDev, projectID, err,
|
||||
)
|
||||
}
|
||||
|
||||
if errno := runQuotactl(); errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s after backingFsBlockDev recreation: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
} else if errno != 0 {
|
||||
return fmt.Errorf("failed to set quota limit for projid %d on %s: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetQuota - get the quota limits of a directory that was configured with SetQuota
|
||||
func (q *Control) GetQuota(targetPath string, quota *Quota) error {
|
||||
d, err := q.fsDiskQuotaFromPath(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
quota.Size = uint64(d.d_blk_hardlimit) * 512
|
||||
quota.Inodes = uint64(d.d_ino_hardlimit)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetDiskUsage - get the current disk usage of a directory that was configured with SetQuota
|
||||
func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) error {
|
||||
d, err := q.fsDiskQuotaFromPath(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
usage.Size = int64(d.d_bcount) * 512
|
||||
usage.InodeCount = int64(d.d_icount)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) {
|
||||
var d C.fs_disk_quota_t
|
||||
var projectID uint32
|
||||
value, ok := q.quotas.Load(targetPath)
|
||||
if ok {
|
||||
projectID, ok = value.(uint32)
|
||||
}
|
||||
if !ok {
|
||||
return d, fmt.Errorf("quota not found for path : %s", targetPath)
|
||||
}
|
||||
|
||||
//
|
||||
// get the quota limit for the container's project id
|
||||
//
|
||||
cs := C.CString(q.backingFsBlockDev)
|
||||
defer C.free(unsafe.Pointer(cs))
|
||||
|
||||
_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, C.Q_XGETPQUOTA,
|
||||
uintptr(unsafe.Pointer(cs)), uintptr(C.__u32(projectID)),
|
||||
uintptr(unsafe.Pointer(&d)), 0, 0)
|
||||
if errno != 0 {
|
||||
return d, fmt.Errorf("failed to get quota limit for projid %d on %s: %w",
|
||||
projectID, q.backingFsBlockDev, errno)
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// getProjectID - get the project id of path on xfs
|
||||
func getProjectID(targetPath string) (uint32, error) {
|
||||
dir, err := openDir(targetPath)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
|
||||
return uint32(fsx.fsx_projid), nil
|
||||
}
|
||||
|
||||
// setProjectID - set the project id of path on xfs
|
||||
func setProjectID(targetPath string, projectID uint32) error {
|
||||
dir, err := openDir(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeDir(dir)
|
||||
|
||||
var fsx C.struct_fsxattr
|
||||
_, _, errno := unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSGETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to get projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
fsx.fsx_projid = C.__u32(projectID)
|
||||
fsx.fsx_xflags |= C.FS_XFLAG_PROJINHERIT
|
||||
_, _, errno = unix.Syscall(unix.SYS_IOCTL, getDirFd(dir), C.FS_IOC_FSSETXATTR,
|
||||
uintptr(unsafe.Pointer(&fsx)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("failed to set projid for %s: %w", targetPath, errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// findNextProjectID - find the next project id to be used for containers
|
||||
// by scanning driver home directory to find used project ids
|
||||
func (q *Control) findNextProjectID() error {
|
||||
files, err := os.ReadDir(q.basePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("read directory failed : %s", q.basePath)
|
||||
}
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
continue
|
||||
}
|
||||
path := filepath.Join(q.basePath, file.Name())
|
||||
projid, err := getProjectID(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if projid > 0 {
|
||||
q.quotas.Store(path, projid)
|
||||
}
|
||||
if q.nextProjectID <= projid {
|
||||
q.nextProjectID = projid + 1
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func free(p *C.char) {
|
||||
C.free(unsafe.Pointer(p))
|
||||
}
|
||||
|
||||
func openDir(path string) (*C.DIR, error) {
|
||||
Cpath := C.CString(path)
|
||||
defer free(Cpath)
|
||||
|
||||
dir, errno := C.opendir(Cpath)
|
||||
if dir == nil {
|
||||
return nil, fmt.Errorf("can't open dir %v: %w", Cpath, errno)
|
||||
}
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
func closeDir(dir *C.DIR) {
|
||||
if dir != nil {
|
||||
C.closedir(dir)
|
||||
}
|
||||
}
|
||||
|
||||
func getDirFd(dir *C.DIR) uintptr {
|
||||
return uintptr(C.dirfd(dir))
|
||||
}
|
||||
|
||||
// Get the backing block device of the driver home directory
|
||||
// and create a block device node under the home directory
|
||||
// to be used by quotactl commands
|
||||
func makeBackingFsDev(home string) (string, error) {
|
||||
var stat unix.Stat_t
|
||||
if err := unix.Stat(home, &stat); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
backingFsBlockDev := path.Join(home, BackingFsBlockDeviceLink)
|
||||
backingFsBlockDevTmp := backingFsBlockDev + ".tmp"
|
||||
// Re-create just in case someone copied the home directory over to a new device
|
||||
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
|
||||
if !errors.Is(err, unix.EEXIST) {
|
||||
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
|
||||
}
|
||||
// On EEXIST, try again after unlinking any potential leftover.
|
||||
_ = unix.Unlink(backingFsBlockDevTmp)
|
||||
if err := unix.Mknod(backingFsBlockDevTmp, unix.S_IFBLK|0o600, int(stat.Dev)); err != nil {
|
||||
return "", fmt.Errorf("failed to mknod %s: %w", backingFsBlockDevTmp, err)
|
||||
}
|
||||
}
|
||||
if err := unix.Rename(backingFsBlockDevTmp, backingFsBlockDev); err != nil {
|
||||
return "", fmt.Errorf("failed to rename %s to %s: %w", backingFsBlockDevTmp, backingFsBlockDev, err)
|
||||
}
|
||||
|
||||
return backingFsBlockDev, nil
|
||||
}
|
119
vendor/github.com/containers/storage/layers.go
generated
vendored
119
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -126,6 +126,13 @@ type Layer struct {
|
||||
// as a DiffID.
|
||||
UncompressedDigest digest.Digest `json:"diff-digest,omitempty"`
|
||||
|
||||
// TOCDigest represents the digest of the Table of Contents (TOC) of the blob.
|
||||
// This digest is utilized when the UncompressedDigest is not
|
||||
// validated during the partial image pull process, but the
|
||||
// TOC itself is validated.
|
||||
// It serves as an alternative reference under these specific conditions.
|
||||
TOCDigest digest.Digest `json:"toc-digest,omitempty"`
|
||||
|
||||
// UncompressedSize is the length of the blob that was last passed to
|
||||
// ApplyDiff() or create(), after we decompressed it. If
|
||||
// UncompressedDigest is not set, this should be treated as if it were
|
||||
@ -228,6 +235,10 @@ type roLayerStore interface {
|
||||
// specified uncompressed digest value recorded for them.
|
||||
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayersByTOCDigest returns a slice of the layers with the
|
||||
// specified uncompressed digest value recorded for them.
|
||||
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// Layers returns a slice of the known layers.
|
||||
Layers() ([]Layer, error)
|
||||
}
|
||||
@ -296,13 +307,13 @@ type rwLayerStore interface {
|
||||
|
||||
// ApplyDiffWithDiffer applies the changes through the differ callback function.
|
||||
// If to is the empty string, then a staging directory is created by the driver.
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
||||
ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// DifferTarget gets the location where files are stored for the layer.
|
||||
DifferTarget(id string) (string, error)
|
||||
@ -337,6 +348,7 @@ type layerStore struct {
|
||||
bymount map[string]*Layer
|
||||
bycompressedsum map[digest.Digest][]string
|
||||
byuncompressedsum map[digest.Digest][]string
|
||||
bytocsum map[digest.Digest][]string
|
||||
layerspathsModified [numLayerLocationIndex]time.Time
|
||||
|
||||
// FIXME: This field is only set when constructing layerStore, but locking rules of the driver
|
||||
@ -366,6 +378,7 @@ func copyLayer(l *Layer) *Layer {
|
||||
CompressedSize: l.CompressedSize,
|
||||
UncompressedDigest: l.UncompressedDigest,
|
||||
UncompressedSize: l.UncompressedSize,
|
||||
TOCDigest: l.TOCDigest,
|
||||
CompressionType: l.CompressionType,
|
||||
ReadOnly: l.ReadOnly,
|
||||
volatileStore: l.volatileStore,
|
||||
@ -745,6 +758,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
names := make(map[string]*Layer)
|
||||
compressedsums := make(map[digest.Digest][]string)
|
||||
uncompressedsums := make(map[digest.Digest][]string)
|
||||
tocsums := make(map[digest.Digest][]string)
|
||||
var errorToResolveBySaving error // == nil; if there are multiple errors, this is one of them.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
selinux.ClearLabels()
|
||||
@ -765,6 +779,9 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
if layer.UncompressedDigest != "" {
|
||||
uncompressedsums[layer.UncompressedDigest] = append(uncompressedsums[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.TOCDigest != "" {
|
||||
tocsums[layer.TOCDigest] = append(tocsums[layer.TOCDigest], layer.ID)
|
||||
}
|
||||
if layer.MountLabel != "" {
|
||||
selinux.ReserveLabel(layer.MountLabel)
|
||||
}
|
||||
@ -792,6 +809,7 @@ func (r *layerStore) load(lockedForWriting bool) (bool, error) {
|
||||
r.byname = names
|
||||
r.bycompressedsum = compressedsums
|
||||
r.byuncompressedsum = uncompressedsums
|
||||
r.bytocsum = tocsums
|
||||
|
||||
// Load and merge information about which layers are mounted, and where.
|
||||
if r.lockfile.IsReadWrite() {
|
||||
@ -1112,7 +1130,7 @@ func (r *layerStore) Size(name string) (int64, error) {
|
||||
// We use the presence of a non-empty digest as an indicator that the size value was intentionally set, and that
|
||||
// a zero value is not just present because it was never set to anything else (which can happen if the layer was
|
||||
// created by a version of this library that didn't keep track of digest and size information).
|
||||
if layer.UncompressedDigest != "" {
|
||||
if layer.TOCDigest != "" || layer.UncompressedDigest != "" {
|
||||
return layer.UncompressedSize, nil
|
||||
}
|
||||
return -1, nil
|
||||
@ -1201,6 +1219,9 @@ func (r *layerStore) PutAdditionalLayer(id string, parentLayer *Layer, names []s
|
||||
if layer.UncompressedDigest != "" {
|
||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.TOCDigest != "" {
|
||||
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
|
||||
}
|
||||
if err := r.saveFor(layer); err != nil {
|
||||
if e := r.Delete(layer.ID); e != nil {
|
||||
logrus.Errorf("While recovering from a failure to save layers, error deleting layer %#v: %v", id, e)
|
||||
@ -1251,6 +1272,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
templateCompressedDigest digest.Digest
|
||||
templateCompressedSize int64
|
||||
templateUncompressedDigest digest.Digest
|
||||
templateTOCDigest digest.Digest
|
||||
templateUncompressedSize int64
|
||||
templateCompressionType archive.Compression
|
||||
templateUIDs, templateGIDs []uint32
|
||||
@ -1263,6 +1285,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
}
|
||||
templateMetadata = templateLayer.Metadata
|
||||
templateIDMappings = idtools.NewIDMappingsFromMaps(templateLayer.UIDMap, templateLayer.GIDMap)
|
||||
templateTOCDigest = templateLayer.TOCDigest
|
||||
templateCompressedDigest, templateCompressedSize = templateLayer.CompressedDigest, templateLayer.CompressedSize
|
||||
templateUncompressedDigest, templateUncompressedSize = templateLayer.UncompressedDigest, templateLayer.UncompressedSize
|
||||
templateCompressionType = templateLayer.CompressionType
|
||||
@ -1291,6 +1314,7 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
CompressedDigest: templateCompressedDigest,
|
||||
CompressedSize: templateCompressedSize,
|
||||
UncompressedDigest: templateUncompressedDigest,
|
||||
TOCDigest: templateTOCDigest,
|
||||
UncompressedSize: templateUncompressedSize,
|
||||
CompressionType: templateCompressionType,
|
||||
UIDs: templateUIDs,
|
||||
@ -1413,6 +1437,9 @@ func (r *layerStore) create(id string, parentLayer *Layer, names []string, mount
|
||||
if layer.UncompressedDigest != "" {
|
||||
r.byuncompressedsum[layer.UncompressedDigest] = append(r.byuncompressedsum[layer.UncompressedDigest], layer.ID)
|
||||
}
|
||||
if layer.TOCDigest != "" {
|
||||
r.bytocsum[layer.TOCDigest] = append(r.bytocsum[layer.TOCDigest], layer.ID)
|
||||
}
|
||||
}
|
||||
|
||||
delete(layer.Flags, incompleteFlag)
|
||||
@ -2007,9 +2034,16 @@ func (s *simpleGetCloser) Close() error {
|
||||
// LOCKING BUG: See the comments in layerStore.Diff
|
||||
func (r *layerStore) newFileGetter(id string) (drivers.FileGetCloser, error) {
|
||||
if getter, ok := r.driver.(drivers.DiffGetterDriver); ok {
|
||||
return getter.DiffGetter(id)
|
||||
fgc, err := getter.DiffGetter(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fgc != nil {
|
||||
return fgc, nil
|
||||
}
|
||||
}
|
||||
path, err := r.Mount(id, drivers.MountOpts{})
|
||||
|
||||
path, err := r.Mount(id, drivers.MountOpts{Options: []string{"ro"}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -2197,6 +2231,25 @@ func (r *layerStore) DiffSize(from, to string) (size int64, err error) {
|
||||
return r.driver.DiffSize(to, r.layerMappings(toLayer), from, r.layerMappings(fromLayer), toLayer.MountLabel)
|
||||
}
|
||||
|
||||
func updateDigestMap(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
|
||||
var newList []string
|
||||
if oldvalue != "" {
|
||||
for _, value := range (*m)[oldvalue] {
|
||||
if value != id {
|
||||
newList = append(newList, value)
|
||||
}
|
||||
}
|
||||
if len(newList) > 0 {
|
||||
(*m)[oldvalue] = newList
|
||||
} else {
|
||||
delete(*m, oldvalue)
|
||||
}
|
||||
}
|
||||
if newvalue != "" {
|
||||
(*m)[newvalue] = append((*m)[newvalue], id)
|
||||
}
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiff(to string, diff io.Reader) (size int64, err error) {
|
||||
return r.applyDiffWithOptions(to, nil, diff)
|
||||
@ -2313,24 +2366,6 @@ func (r *layerStore) applyDiffWithOptions(to string, layerOptions *LayerOptions,
|
||||
uncompressedDigest = uncompressedDigester.Digest()
|
||||
}
|
||||
|
||||
updateDigestMap := func(m *map[digest.Digest][]string, oldvalue, newvalue digest.Digest, id string) {
|
||||
var newList []string
|
||||
if oldvalue != "" {
|
||||
for _, value := range (*m)[oldvalue] {
|
||||
if value != id {
|
||||
newList = append(newList, value)
|
||||
}
|
||||
}
|
||||
if len(newList) > 0 {
|
||||
(*m)[oldvalue] = newList
|
||||
} else {
|
||||
delete(*m, oldvalue)
|
||||
}
|
||||
}
|
||||
if newvalue != "" {
|
||||
(*m)[newvalue] = append((*m)[newvalue], id)
|
||||
}
|
||||
}
|
||||
updateDigestMap(&r.bycompressedsum, layer.CompressedDigest, compressedDigest, layer.ID)
|
||||
layer.CompressedDigest = compressedDigest
|
||||
layer.CompressedSize = compressedCounter.Count
|
||||
@ -2372,7 +2407,7 @@ func (r *layerStore) DifferTarget(id string) (string, error) {
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
|
||||
func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
|
||||
if !ok {
|
||||
return ErrNotSupported
|
||||
@ -2382,20 +2417,35 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
return ErrLayerUnknown
|
||||
}
|
||||
if options == nil {
|
||||
options = &drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
options = &drivers.ApplyDiffWithDifferOpts{
|
||||
ApplyDiffOpts: drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
},
|
||||
Flags: nil,
|
||||
}
|
||||
}
|
||||
|
||||
err := ddriver.ApplyDiffFromStagingDirectory(layer.ID, layer.Parent, stagingDirectory, diffOutput, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
layer.UIDs = diffOutput.UIDs
|
||||
layer.GIDs = diffOutput.GIDs
|
||||
updateDigestMap(&r.byuncompressedsum, layer.UncompressedDigest, diffOutput.UncompressedDigest, layer.ID)
|
||||
layer.UncompressedDigest = diffOutput.UncompressedDigest
|
||||
updateDigestMap(&r.bytocsum, diffOutput.TOCDigest, diffOutput.TOCDigest, layer.ID)
|
||||
layer.TOCDigest = diffOutput.TOCDigest
|
||||
layer.UncompressedSize = diffOutput.Size
|
||||
layer.Metadata = diffOutput.Metadata
|
||||
if options != nil && options.Flags != nil {
|
||||
if layer.Flags == nil {
|
||||
layer.Flags = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range options.Flags {
|
||||
layer.Flags[k] = v
|
||||
}
|
||||
}
|
||||
if len(diffOutput.TarSplit) != 0 {
|
||||
tsdata := bytes.Buffer{}
|
||||
compressor, err := pgzip.NewWriterLevel(&tsdata, pgzip.BestSpeed)
|
||||
@ -2432,7 +2482,7 @@ func (r *layerStore) ApplyDiffFromStagingDirectory(id, stagingDirectory string,
|
||||
}
|
||||
|
||||
// Requires startWriting.
|
||||
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
ddriver, ok := r.driver.(drivers.DriverWithDiffer)
|
||||
if !ok {
|
||||
return nil, ErrNotSupported
|
||||
@ -2448,9 +2498,11 @@ func (r *layerStore) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOp
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
if options == nil {
|
||||
options = &drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
options = &drivers.ApplyDiffWithDifferOpts{
|
||||
ApplyDiffOpts: drivers.ApplyDiffOpts{
|
||||
Mappings: r.layerMappings(layer),
|
||||
MountLabel: layer.MountLabel,
|
||||
},
|
||||
}
|
||||
}
|
||||
output, err := ddriver.ApplyDiffWithDiffer(layer.ID, layer.Parent, options, differ)
|
||||
@ -2494,6 +2546,11 @@ func (r *layerStore) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error
|
||||
return r.layersByDigestMap(r.byuncompressedsum, d)
|
||||
}
|
||||
|
||||
// Requires startReading or startWriting.
|
||||
func (r *layerStore) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
|
||||
return r.layersByDigestMap(r.bytocsum, d)
|
||||
}
|
||||
|
||||
func closeAll(closes ...func() error) (rErr error) {
|
||||
for _, f := range closes {
|
||||
if err := f(); err != nil {
|
||||
|
6
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
6
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -477,7 +477,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeSocket != 0 {
|
||||
logrus.Warnf("archive: skipping %q since it is a socket", path)
|
||||
logrus.Infof("archive: skipping %q since it is a socket", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -534,6 +534,10 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
if ta.ChownOpts != nil {
|
||||
hdr.Uid = ta.ChownOpts.UID
|
||||
hdr.Gid = ta.ChownOpts.GID
|
||||
// Don’t expose the user names from the local system; they probably don’t match the ta.ChownOpts value anyway,
|
||||
// and they unnecessarily give recipients of the tar file potentially private data.
|
||||
hdr.Uname = ""
|
||||
hdr.Gname = ""
|
||||
}
|
||||
|
||||
maybeTruncateHeaderModTime(hdr)
|
||||
|
16
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
16
vendor/github.com/containers/storage/pkg/chunked/cache_linux.go
generated
vendored
@ -578,7 +578,10 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
return byteSliceAsString(buf.Bytes()[from:to])
|
||||
}
|
||||
|
||||
iter = jsoniter.ParseBytes(jsoniter.ConfigFastest, manifest)
|
||||
pool := iter.Pool()
|
||||
pool.ReturnIterator(iter)
|
||||
iter = pool.BorrowIterator(manifest)
|
||||
|
||||
for field := iter.ReadObject(); field != ""; field = iter.ReadObject() {
|
||||
if strings.ToLower(field) == "version" {
|
||||
toc.Version = iter.ReadInt()
|
||||
@ -657,8 +660,17 @@ func unmarshalToc(manifest []byte) (*internal.TOC, error) {
|
||||
}
|
||||
toc.Entries = append(toc.Entries, m)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// validate there is no extra data in the provided input. This is a security measure to avoid
|
||||
// that the digest we calculate for the TOC refers to the entire document.
|
||||
if iter.Error != nil && iter.Error != io.EOF {
|
||||
return nil, iter.Error
|
||||
}
|
||||
if iter.WhatIsNext() != jsoniter.InvalidValue || !errors.Is(iter.Error, io.EOF) {
|
||||
return nil, fmt.Errorf("unexpected data after manifest")
|
||||
}
|
||||
|
||||
toc.StringsBuf = buf
|
||||
return &toc, nil
|
||||
}
|
||||
|
16
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
16
vendor/github.com/containers/storage/pkg/chunked/compressor/compressor.go
generated
vendored
@ -420,6 +420,14 @@ func writeZstdChunkedStream(destFile io.Writer, outMetadata map[string]string, r
|
||||
zstdWriter.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
// make sure the entire tarball is flushed to the output as it might contain
|
||||
// some trailing zeros that affect the checksum.
|
||||
if _, err := io.Copy(zstdWriter, its); err != nil {
|
||||
zstdWriter.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
if err := zstdWriter.Flush(); err != nil {
|
||||
zstdWriter.Close()
|
||||
return err
|
||||
@ -452,12 +460,12 @@ type zstdChunkedWriter struct {
|
||||
}
|
||||
|
||||
func (w zstdChunkedWriter) Close() error {
|
||||
err := <-w.tarSplitErr
|
||||
if err != nil {
|
||||
w.tarSplitOut.Close()
|
||||
errClose := w.tarSplitOut.Close()
|
||||
|
||||
if err := <-w.tarSplitErr; err != nil && err != io.EOF {
|
||||
return err
|
||||
}
|
||||
return w.tarSplitOut.Close()
|
||||
return errClose
|
||||
}
|
||||
|
||||
func (w zstdChunkedWriter) Write(p []byte) (int, error) {
|
||||
|
2
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chunked/dump/dump.go
generated
vendored
@ -94,7 +94,7 @@ func getStMode(mode uint32, typ string) (uint32, error) {
|
||||
}
|
||||
|
||||
func dumpNode(out io.Writer, links map[string]int, verityDigests map[string]string, entry *internal.FileMetadata) error {
|
||||
path := entry.Name
|
||||
path := strings.TrimRight(entry.Name, "/")
|
||||
if path == "" {
|
||||
path = "/"
|
||||
} else if path[0] != '/' {
|
||||
|
131
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
131
vendor/github.com/containers/storage/pkg/chunked/storage_linux.go
generated
vendored
@ -86,6 +86,11 @@ type chunkedDiffer struct {
|
||||
// the layer are trusted and should not be validated.
|
||||
skipValidation bool
|
||||
|
||||
// blobDigest is the digest of the whole compressed layer. It is used if
|
||||
// convertToZstdChunked to validate a layer when it is converted since there
|
||||
// is no TOC referenced by the manifest.
|
||||
blobDigest digest.Digest
|
||||
|
||||
blobSize int64
|
||||
|
||||
storeOpts *types.StoreOptions
|
||||
@ -188,33 +193,7 @@ func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser,
|
||||
return streams, errs, nil
|
||||
}
|
||||
|
||||
func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) {
|
||||
var payload io.ReadCloser
|
||||
var streams chan io.ReadCloser
|
||||
var errs chan error
|
||||
var err error
|
||||
|
||||
chunksToRequest := []ImageSourceChunk{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: uint64(blobSize),
|
||||
},
|
||||
}
|
||||
|
||||
streams, errs, err = iss.GetBlobAt(chunksToRequest)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
}
|
||||
select {
|
||||
case p := <-streams:
|
||||
payload = p
|
||||
case err := <-errs:
|
||||
return nil, "", nil, err
|
||||
}
|
||||
if payload == nil {
|
||||
return nil, "", nil, errors.New("invalid stream returned")
|
||||
}
|
||||
|
||||
func convertTarToZstdChunked(destDirectory string, payload *os.File) (*seekableFile, digest.Digest, map[string]string, error) {
|
||||
diff, err := archive.DecompressStream(payload)
|
||||
if err != nil {
|
||||
return nil, "", nil, err
|
||||
@ -235,10 +214,8 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour
|
||||
return nil, "", nil, err
|
||||
}
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
hash := digester.Hash()
|
||||
|
||||
if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil {
|
||||
convertedOutputDigester := digest.Canonical.Digester()
|
||||
if _, err := io.Copy(io.MultiWriter(chunked, convertedOutputDigester.Hash()), diff); err != nil {
|
||||
f.Close()
|
||||
return nil, "", nil, err
|
||||
}
|
||||
@ -249,27 +226,35 @@ func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSour
|
||||
is := seekableFile{
|
||||
file: f,
|
||||
}
|
||||
return &is, digester.Digest(), newAnnotations, nil
|
||||
|
||||
return &is, convertedOutputDigester.Digest(), newAnnotations, nil
|
||||
}
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
storeOpts, err := types.DefaultStoreOptionsAutoDetectUID()
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
storeOpts, err := types.DefaultStoreOptions()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
_, hasZstdChunkedTOC := annotations[internal.ManifestChecksumKey]
|
||||
_, hasEstargzTOC := annotations[estargz.TOCJSONDigestAnnotation]
|
||||
|
||||
if hasZstdChunkedTOC && hasEstargzTOC {
|
||||
return nil, errors.New("both zstd:chunked and eStargz TOC found")
|
||||
}
|
||||
|
||||
if hasZstdChunkedTOC {
|
||||
return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
||||
}
|
||||
if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok {
|
||||
if hasEstargzTOC {
|
||||
return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
||||
}
|
||||
|
||||
return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts)
|
||||
return makeConvertFromRawDiffer(ctx, store, blobDigest, blobSize, annotations, iss, &storeOpts)
|
||||
}
|
||||
|
||||
func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) {
|
||||
if !parseBooleanPullOption(storeOpts, "convert_images", false) {
|
||||
return nil, errors.New("convert_images not configured")
|
||||
}
|
||||
@ -280,6 +265,7 @@ func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize
|
||||
}
|
||||
|
||||
return &chunkedDiffer{
|
||||
blobDigest: blobDigest,
|
||||
blobSize: blobSize,
|
||||
convertToZstdChunked: true,
|
||||
copyBuffer: makeCopyBuffer(),
|
||||
@ -1491,6 +1477,43 @@ func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMeta
|
||||
return new, nil
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) copyAllBlobToFile(destination *os.File) (digest.Digest, error) {
|
||||
var payload io.ReadCloser
|
||||
var streams chan io.ReadCloser
|
||||
var errs chan error
|
||||
var err error
|
||||
|
||||
chunksToRequest := []ImageSourceChunk{
|
||||
{
|
||||
Offset: 0,
|
||||
Length: uint64(c.blobSize),
|
||||
},
|
||||
}
|
||||
|
||||
streams, errs, err = c.stream.GetBlobAt(chunksToRequest)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
select {
|
||||
case p := <-streams:
|
||||
payload = p
|
||||
case err := <-errs:
|
||||
return "", err
|
||||
}
|
||||
if payload == nil {
|
||||
return "", errors.New("invalid stream returned")
|
||||
}
|
||||
|
||||
originalRawDigester := digest.Canonical.Digester()
|
||||
|
||||
r := io.TeeReader(payload, originalRawDigester.Hash())
|
||||
|
||||
// copy the entire tarball and compute its digest
|
||||
_, err = io.Copy(destination, r)
|
||||
|
||||
return originalRawDigester.Digest(), err
|
||||
}
|
||||
|
||||
func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) {
|
||||
defer c.layersCache.release()
|
||||
defer func() {
|
||||
@ -1503,7 +1526,32 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
stream := c.stream
|
||||
|
||||
if c.convertToZstdChunked {
|
||||
fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream)
|
||||
fd, err := unix.Open(dest, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
blobFile := os.NewFile(uintptr(fd), "blob-file")
|
||||
defer func() {
|
||||
if blobFile != nil {
|
||||
blobFile.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// calculate the checksum before accessing the file.
|
||||
compressedDigest, err := c.copyAllBlobToFile(blobFile)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
if compressedDigest != c.blobDigest {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("invalid digest to convert: expected %q, got %q", c.blobDigest, compressedDigest)
|
||||
}
|
||||
|
||||
if _, err := blobFile.Seek(0, io.SeekStart); err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
|
||||
fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, blobFile)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, err
|
||||
}
|
||||
@ -1511,6 +1559,10 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
// need to keep it open until the entire file is processed.
|
||||
defer fileSource.Close()
|
||||
|
||||
// Close the file so that the file descriptor is released and the file is deleted.
|
||||
blobFile.Close()
|
||||
blobFile = nil
|
||||
|
||||
manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations)
|
||||
if err != nil {
|
||||
return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err)
|
||||
@ -1523,6 +1575,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, diff
|
||||
c.fileType = fileTypeZstdChunked
|
||||
c.manifest = manifest
|
||||
c.tarSplit = tarSplit
|
||||
|
||||
// since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest.
|
||||
c.contentDigest = diffID
|
||||
c.tocOffset = tocOffset
|
||||
|
3
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
3
vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go
generated
vendored
@ -9,9 +9,10 @@ import (
|
||||
|
||||
storage "github.com/containers/storage"
|
||||
graphdriver "github.com/containers/storage/drivers"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// GetDiffer returns a differ than can be used with ApplyDiffWithDiffer.
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
func GetDiffer(ctx context.Context, store storage.Store, blobDigest digest.Digest, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) {
|
||||
return nil, errors.New("format not supported on this system")
|
||||
}
|
||||
|
34
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
Normal file
34
vendor/github.com/containers/storage/pkg/chunked/toc/toc.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package toc
|
||||
|
||||
import (
|
||||
"github.com/containers/storage/pkg/chunked/internal"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// tocJSONDigestAnnotation is the annotation key for the digest of the estargz
|
||||
// TOC JSON.
|
||||
// It is defined in github.com/containerd/stargz-snapshotter/estargz as TOCJSONDigestAnnotation
|
||||
// Duplicate it here to avoid a dependency on the package.
|
||||
const tocJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
||||
|
||||
// GetTOCDigest returns the digest of the TOC as recorded in the annotations.
|
||||
// This function retrieves a digest that represents the content of a
|
||||
// table of contents (TOC) from the image's annotations.
|
||||
// This is an experimental feature and may be changed/removed in the future.
|
||||
func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) {
|
||||
if contentDigest, ok := annotations[tocJSONDigestAnnotation]; ok {
|
||||
d, err := digest.Parse(contentDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
if contentDigest, ok := annotations[internal.ManifestChecksumKey]; ok {
|
||||
d, err := digest.Parse(contentDigest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &d, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
9
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
9
vendor/github.com/containers/storage/pkg/config/config.go
generated
vendored
@ -97,6 +97,8 @@ type OverlayOptionsConfig struct {
|
||||
Inodes string `toml:"inodes,omitempty"`
|
||||
// Do not create a bind mount on the storage home
|
||||
SkipMountHome string `toml:"skip_mount_home,omitempty"`
|
||||
// Specify whether composefs must be used to mount the data layers
|
||||
UseComposefs string `toml:"use_composefs,omitempty"`
|
||||
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
|
||||
// files and directories
|
||||
ForceMask string `toml:"force_mask,omitempty"`
|
||||
@ -147,6 +149,9 @@ type OptionsConfig struct {
|
||||
// ignored when building an image.
|
||||
IgnoreChownErrors string `toml:"ignore_chown_errors,omitempty"`
|
||||
|
||||
// Specify whether composefs must be used to mount the data layers
|
||||
UseComposefs string `toml:"use_composefs,omitempty"`
|
||||
|
||||
// ForceMask indicates the permissions mask (e.g. "0755") to use for new
|
||||
// files and directories.
|
||||
ForceMask os.FileMode `toml:"force_mask,omitempty"`
|
||||
@ -283,6 +288,7 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
||||
}
|
||||
|
||||
case "overlay", "overlay2":
|
||||
// Specify whether composefs must be used to mount the data layers
|
||||
if options.Overlay.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Overlay.IgnoreChownErrors))
|
||||
} else if options.IgnoreChownErrors != "" {
|
||||
@ -316,6 +322,9 @@ func GetGraphDriverOptions(driverName string, options OptionsConfig) []string {
|
||||
} else if options.ForceMask != 0 {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.force_mask=%s", driverName, options.ForceMask))
|
||||
}
|
||||
if options.Overlay.UseComposefs != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.use_composefs=%s", driverName, options.Overlay.UseComposefs))
|
||||
}
|
||||
case "vfs":
|
||||
if options.Vfs.IgnoreChownErrors != "" {
|
||||
doptions = append(doptions, fmt.Sprintf("%s.ignore_chown_errors=%s", driverName, options.Vfs.IgnoreChownErrors))
|
||||
|
15
vendor/github.com/containers/storage/pkg/homedir/homedir.go
generated
vendored
15
vendor/github.com/containers/storage/pkg/homedir/homedir.go
generated
vendored
@ -6,21 +6,6 @@ import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// GetConfigHome returns XDG_CONFIG_HOME.
|
||||
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
|
||||
//
|
||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
func GetConfigHome() (string, error) {
|
||||
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
|
||||
return xdgConfigHome, nil
|
||||
}
|
||||
home := Get()
|
||||
if home == "" {
|
||||
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
|
||||
}
|
||||
return filepath.Join(home, ".config"), nil
|
||||
}
|
||||
|
||||
// GetDataHome returns XDG_DATA_HOME.
|
||||
// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set.
|
||||
//
|
||||
|
17
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
17
vendor/github.com/containers/storage/pkg/homedir/homedir_others.go
generated
vendored
@ -8,6 +8,8 @@ package homedir
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// GetRuntimeDir is unsupported on non-linux system.
|
||||
@ -19,3 +21,18 @@ func GetRuntimeDir() (string, error) {
|
||||
func StickRuntimeDirContents(files []string) ([]string, error) {
|
||||
return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system")
|
||||
}
|
||||
|
||||
// GetConfigHome returns XDG_CONFIG_HOME.
|
||||
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
|
||||
//
|
||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
func GetConfigHome() (string, error) {
|
||||
if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" {
|
||||
return xdgConfigHome, nil
|
||||
}
|
||||
home := Get()
|
||||
if home == "" {
|
||||
return "", errors.New("could not get either XDG_CONFIG_HOME or HOME")
|
||||
}
|
||||
return filepath.Join(home, ".config"), nil
|
||||
}
|
||||
|
113
vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
generated
vendored
113
vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go
generated
vendored
@ -7,12 +7,16 @@ package homedir
|
||||
// NOTE: this package has originally been copied from github.com/docker/docker.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Key returns the env var name for the user's home dir based on
|
||||
@ -40,18 +44,6 @@ func GetShortcutString() string {
|
||||
return "~"
|
||||
}
|
||||
|
||||
// GetRuntimeDir returns XDG_RUNTIME_DIR.
|
||||
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
||||
// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set.
|
||||
//
|
||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
func GetRuntimeDir() (string, error) {
|
||||
if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" {
|
||||
return filepath.EvalSymlinks(xdgRuntimeDir)
|
||||
}
|
||||
return "", errors.New("could not get XDG_RUNTIME_DIR")
|
||||
}
|
||||
|
||||
// StickRuntimeDirContents sets the sticky bit on files that are under
|
||||
// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system.
|
||||
//
|
||||
@ -94,3 +86,98 @@ func stick(f string) error {
|
||||
m |= os.ModeSticky
|
||||
return os.Chmod(f, m)
|
||||
}
|
||||
|
||||
var (
|
||||
rootlessConfigHomeDirError error
|
||||
rootlessConfigHomeDirOnce sync.Once
|
||||
rootlessConfigHomeDir string
|
||||
rootlessRuntimeDirOnce sync.Once
|
||||
rootlessRuntimeDir string
|
||||
)
|
||||
|
||||
// isWriteableOnlyByOwner checks that the specified permission mask allows write
|
||||
// access only to the owner.
|
||||
func isWriteableOnlyByOwner(perm os.FileMode) bool {
|
||||
return (perm & 0o722) == 0o700
|
||||
}
|
||||
|
||||
// GetConfigHome returns XDG_CONFIG_HOME.
|
||||
// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set.
|
||||
//
|
||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
func GetConfigHome() (string, error) {
|
||||
rootlessConfigHomeDirOnce.Do(func() {
|
||||
cfgHomeDir := os.Getenv("XDG_CONFIG_HOME")
|
||||
if cfgHomeDir == "" {
|
||||
home := Get()
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err)
|
||||
return
|
||||
}
|
||||
tmpDir := filepath.Join(resolvedHome, ".config")
|
||||
_ = os.MkdirAll(tmpDir, 0o700)
|
||||
st, err := os.Stat(tmpDir)
|
||||
if err != nil {
|
||||
rootlessConfigHomeDirError = err
|
||||
return
|
||||
} else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() {
|
||||
cfgHomeDir = tmpDir
|
||||
} else {
|
||||
rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir)
|
||||
return
|
||||
}
|
||||
}
|
||||
rootlessConfigHomeDir = cfgHomeDir
|
||||
})
|
||||
|
||||
return rootlessConfigHomeDir, rootlessConfigHomeDirError
|
||||
}
|
||||
|
||||
// GetRuntimeDir returns a directory suitable to store runtime files.
|
||||
// The function will try to use the XDG_RUNTIME_DIR env variable if it is set.
|
||||
// XDG_RUNTIME_DIR is typically configured via pam_systemd.
|
||||
// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable
|
||||
// directory for the current user.
|
||||
//
|
||||
// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html
|
||||
func GetRuntimeDir() (string, error) {
|
||||
var rootlessRuntimeDirError error
|
||||
|
||||
rootlessRuntimeDirOnce.Do(func() {
|
||||
runtimeDir := os.Getenv("XDG_RUNTIME_DIR")
|
||||
|
||||
if runtimeDir != "" {
|
||||
rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir)
|
||||
return
|
||||
}
|
||||
|
||||
uid := strconv.Itoa(unshare.GetRootlessUID())
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join("/run", "user", uid)
|
||||
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
|
||||
logrus.Debug(err)
|
||||
}
|
||||
st, err := os.Lstat(tmpDir)
|
||||
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
|
||||
runtimeDir = tmpDir
|
||||
}
|
||||
}
|
||||
if runtimeDir == "" {
|
||||
tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid))
|
||||
if err := os.MkdirAll(tmpDir, 0o700); err != nil {
|
||||
logrus.Debug(err)
|
||||
}
|
||||
st, err := os.Lstat(tmpDir)
|
||||
if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) {
|
||||
runtimeDir = tmpDir
|
||||
} else {
|
||||
rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir)
|
||||
return
|
||||
}
|
||||
}
|
||||
rootlessRuntimeDir = runtimeDir
|
||||
})
|
||||
|
||||
return rootlessRuntimeDir, rootlessRuntimeDirError
|
||||
}
|
||||
|
2
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go
generated
vendored
@ -17,7 +17,7 @@ const (
|
||||
|
||||
// IsRootless tells us if we are running in rootless mode
|
||||
func IsRootless() bool {
|
||||
return false
|
||||
return os.Getuid() != 0
|
||||
}
|
||||
|
||||
// GetRootlessUID returns the UID of the user in the parent userNS
|
||||
|
3
vendor/github.com/containers/storage/storage.conf
generated
vendored
3
vendor/github.com/containers/storage/storage.conf
generated
vendored
@ -130,6 +130,9 @@ mountopt = "nodev"
|
||||
# Set to skip a PRIVATE bind mount on the storage home directory.
|
||||
# skip_mount_home = "false"
|
||||
|
||||
# Set to use composefs to mount data layers with overlay.
|
||||
# use_composefs = "false"
|
||||
|
||||
# Size is used to set a maximum size of the container image.
|
||||
# size = ""
|
||||
|
||||
|
3
vendor/github.com/containers/storage/storage.conf-freebsd
generated
vendored
3
vendor/github.com/containers/storage/storage.conf-freebsd
generated
vendored
@ -96,6 +96,9 @@ mountopt = "nodev"
|
||||
# Set to skip a PRIVATE bind mount on the storage home directory.
|
||||
# skip_mount_home = "false"
|
||||
|
||||
# Set to use composefs to mount data layers with overlay.
|
||||
# use_composefs = "false"
|
||||
|
||||
# Size is used to set a maximum size of the container image.
|
||||
# size = ""
|
||||
|
||||
|
35
vendor/github.com/containers/storage/store.go
generated
vendored
35
vendor/github.com/containers/storage/store.go
generated
vendored
@ -1,6 +1,7 @@
|
||||
package storage
|
||||
|
||||
import (
|
||||
_ "embed"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -10,6 +11,7 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
// register all of the built-in drivers
|
||||
@ -313,10 +315,10 @@ type Store interface {
|
||||
// ApplyDiffer applies a diff to a layer.
|
||||
// It is the caller responsibility to clean the staging directory if it is not
|
||||
// successfully applied with ApplyDiffFromStagingDirectory.
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error)
|
||||
|
||||
// ApplyDiffFromStagingDirectory uses stagingDirectory to create the diff.
|
||||
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error
|
||||
ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error
|
||||
|
||||
// CleanupStagingDirectory cleanups the staging directory. It can be used to cleanup the staging directory on errors
|
||||
CleanupStagingDirectory(stagingDirectory string) error
|
||||
@ -332,6 +334,10 @@ type Store interface {
|
||||
// specified uncompressed digest value recorded for them.
|
||||
LayersByUncompressedDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayersByTOCDigest returns a slice of the layers with the
|
||||
// specified TOC digest value recorded for them.
|
||||
LayersByTOCDigest(d digest.Digest) ([]Layer, error)
|
||||
|
||||
// LayerSize returns a cached approximation of the layer's size, or -1
|
||||
// if we don't have a value on hand.
|
||||
LayerSize(id string) (int64, error)
|
||||
@ -960,6 +966,10 @@ func (s *store) load() error {
|
||||
} else {
|
||||
ris, err = newROImageStore(gipath)
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EROFS) {
|
||||
logrus.Debugf("Ignoring creation of lockfiles on read-only file systems %q, %v", gipath, err)
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -2741,7 +2751,13 @@ func (s *store) Status() ([][2]string, error) {
|
||||
return rlstore.Status()
|
||||
}
|
||||
|
||||
//go:embed VERSION
|
||||
var storageVersion string
|
||||
|
||||
func (s *store) Version() ([][2]string, error) {
|
||||
if trimmedVersion := strings.TrimSpace(storageVersion); trimmedVersion != "" {
|
||||
return [][2]string{{"Version", trimmedVersion}}, nil
|
||||
}
|
||||
return [][2]string{}, nil
|
||||
}
|
||||
|
||||
@ -2915,7 +2931,7 @@ func (s *store) Diff(from, to string, options *DiffOptions) (io.ReadCloser, erro
|
||||
return nil, ErrLayerUnknown
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffOpts) error {
|
||||
func (s *store) ApplyDiffFromStagingDirectory(to, stagingDirectory string, diffOutput *drivers.DriverWithDifferOutput, options *drivers.ApplyDiffWithDifferOpts) error {
|
||||
_, err := writeToLayerStore(s, func(rlstore rwLayerStore) (struct{}, error) {
|
||||
if !rlstore.Exists(to) {
|
||||
return struct{}{}, ErrLayerUnknown
|
||||
@ -2932,7 +2948,7 @@ func (s *store) CleanupStagingDirectory(stagingDirectory string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
func (s *store) ApplyDiffWithDiffer(to string, options *drivers.ApplyDiffWithDifferOpts, differ drivers.Differ) (*drivers.DriverWithDifferOutput, error) {
|
||||
return writeToLayerStore(s, func(rlstore rwLayerStore) (*drivers.DriverWithDifferOutput, error) {
|
||||
if to != "" && !rlstore.Exists(to) {
|
||||
return nil, ErrLayerUnknown
|
||||
@ -2994,6 +3010,13 @@ func (s *store) LayersByUncompressedDigest(d digest.Digest) ([]Layer, error) {
|
||||
return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByUncompressedDigest(d) }, d)
|
||||
}
|
||||
|
||||
func (s *store) LayersByTOCDigest(d digest.Digest) ([]Layer, error) {
|
||||
if err := d.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("looking for TOC matching digest %q: %w", d, err)
|
||||
}
|
||||
return s.layersByMappedDigest(func(r roLayerStore, d digest.Digest) ([]Layer, error) { return r.LayersByTOCDigest(d) }, d)
|
||||
}
|
||||
|
||||
func (s *store) LayerSize(id string) (int64, error) {
|
||||
if res, done, err := readAllLayerStores(s, func(store roLayerStore) (int64, bool, error) {
|
||||
if store.Exists(id) {
|
||||
@ -3545,8 +3568,8 @@ func SetDefaultConfigFilePath(path string) {
|
||||
}
|
||||
|
||||
// DefaultConfigFile returns the path to the storage config file used
|
||||
func DefaultConfigFile(rootless bool) (string, error) {
|
||||
return types.DefaultConfigFile(rootless)
|
||||
func DefaultConfigFile() (string, error) {
|
||||
return types.DefaultConfigFile()
|
||||
}
|
||||
|
||||
// ReloadConfigurationFile parses the specified configuration file and overrides
|
||||
|
86
vendor/github.com/containers/storage/types/options.go
generated
vendored
86
vendor/github.com/containers/storage/types/options.go
generated
vendored
@ -11,7 +11,9 @@ import (
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
cfg "github.com/containers/storage/pkg/config"
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/unshare"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -87,7 +89,7 @@ func loadDefaultStoreOptions() {
|
||||
|
||||
_, err := os.Stat(defaultOverrideConfigFile)
|
||||
if err == nil {
|
||||
// The DefaultConfigFile(rootless) function returns the path
|
||||
// The DefaultConfigFile() function returns the path
|
||||
// of the used storage.conf file, by returning defaultConfigFile
|
||||
// If override exists containers/storage uses it by default.
|
||||
defaultConfigFile = defaultOverrideConfigFile
|
||||
@ -109,21 +111,41 @@ func loadDefaultStoreOptions() {
|
||||
setDefaults()
|
||||
}
|
||||
|
||||
// defaultStoreOptionsIsolated is an internal implementation detail of DefaultStoreOptions to allow testing.
|
||||
// Everyone but the tests this is intended for should only call DefaultStoreOptions, never this function.
|
||||
func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf string) (StoreOptions, error) {
|
||||
// loadStoreOptions returns the default storage ops for containers
|
||||
func loadStoreOptions() (StoreOptions, error) {
|
||||
storageConf, err := DefaultConfigFile()
|
||||
if err != nil {
|
||||
return defaultStoreOptions, err
|
||||
}
|
||||
return loadStoreOptionsFromConfFile(storageConf)
|
||||
}
|
||||
|
||||
// usePerUserStorage returns whether the user private storage must be used.
|
||||
// We cannot simply use the unshare.IsRootless() condition, because
|
||||
// that checks only if the current process needs a user namespace to
|
||||
// work and it would break cases where the process is already created
|
||||
// in a user namespace (e.g. nested Podman/Buildah) and the desired
|
||||
// behavior is to use system paths instead of user private paths.
|
||||
func usePerUserStorage() bool {
|
||||
return unshare.IsRootless() && unshare.GetRootlessUID() != 0
|
||||
}
|
||||
|
||||
// loadStoreOptionsFromConfFile is an internal implementation detail of DefaultStoreOptions to allow testing.
|
||||
// Everyone but the tests this is intended for should only call loadStoreOptions, never this function.
|
||||
func loadStoreOptionsFromConfFile(storageConf string) (StoreOptions, error) {
|
||||
var (
|
||||
defaultRootlessRunRoot string
|
||||
defaultRootlessGraphRoot string
|
||||
err error
|
||||
)
|
||||
|
||||
defaultStoreOptionsOnce.Do(loadDefaultStoreOptions)
|
||||
if loadDefaultStoreOptionsErr != nil {
|
||||
return StoreOptions{}, loadDefaultStoreOptionsErr
|
||||
}
|
||||
storageOpts := defaultStoreOptions
|
||||
if rootless && rootlessUID != 0 {
|
||||
storageOpts, err = getRootlessStorageOpts(rootlessUID, storageOpts)
|
||||
if usePerUserStorage() {
|
||||
storageOpts, err = getRootlessStorageOpts(storageOpts)
|
||||
if err != nil {
|
||||
return storageOpts, err
|
||||
}
|
||||
@ -137,7 +159,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
|
||||
defaultRootlessGraphRoot = storageOpts.GraphRoot
|
||||
storageOpts = StoreOptions{}
|
||||
reloadConfigurationFileIfNeeded(storageConf, &storageOpts)
|
||||
if rootless && rootlessUID != 0 {
|
||||
if usePerUserStorage() {
|
||||
// If the file did not specify a graphroot or runroot,
|
||||
// set sane defaults so we don't try and use root-owned
|
||||
// directories
|
||||
@ -156,6 +178,7 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
|
||||
if storageOpts.RunRoot == "" {
|
||||
return storageOpts, fmt.Errorf("runroot must be set")
|
||||
}
|
||||
rootlessUID := unshare.GetRootlessUID()
|
||||
runRoot, err := expandEnvPath(storageOpts.RunRoot, rootlessUID)
|
||||
if err != nil {
|
||||
return storageOpts, err
|
||||
@ -186,26 +209,17 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str
|
||||
return storageOpts, nil
|
||||
}
|
||||
|
||||
// loadStoreOptions returns the default storage ops for containers
|
||||
func loadStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
|
||||
storageConf, err := DefaultConfigFile(rootless && rootlessUID != 0)
|
||||
if err != nil {
|
||||
return defaultStoreOptions, err
|
||||
}
|
||||
return defaultStoreOptionsIsolated(rootless, rootlessUID, storageConf)
|
||||
}
|
||||
|
||||
// UpdateOptions should be called iff container engine received a SIGHUP,
|
||||
// otherwise use DefaultStoreOptions
|
||||
func UpdateStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
|
||||
storeOptions, storeError = loadStoreOptions(rootless, rootlessUID)
|
||||
func UpdateStoreOptions() (StoreOptions, error) {
|
||||
storeOptions, storeError = loadStoreOptions()
|
||||
return storeOptions, storeError
|
||||
}
|
||||
|
||||
// DefaultStoreOptions returns the default storage ops for containers
|
||||
func DefaultStoreOptions(rootless bool, rootlessUID int) (StoreOptions, error) {
|
||||
func DefaultStoreOptions() (StoreOptions, error) {
|
||||
once.Do(func() {
|
||||
storeOptions, storeError = loadStoreOptions(rootless, rootlessUID)
|
||||
storeOptions, storeError = loadStoreOptions()
|
||||
})
|
||||
return storeOptions, storeError
|
||||
}
|
||||
@ -270,14 +284,26 @@ func isRootlessDriver(driver string) bool {
|
||||
}
|
||||
|
||||
// getRootlessStorageOpts returns the storage opts for containers running as non root
|
||||
func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOptions, error) {
|
||||
func getRootlessStorageOpts(systemOpts StoreOptions) (StoreOptions, error) {
|
||||
var opts StoreOptions
|
||||
|
||||
dataDir, rootlessRuntime, err := getRootlessDirInfo(rootlessUID)
|
||||
rootlessUID := unshare.GetRootlessUID()
|
||||
|
||||
dataDir, err := homedir.GetDataHome()
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
opts.RunRoot = rootlessRuntime
|
||||
|
||||
rootlessRuntime, err := homedir.GetRuntimeDir()
|
||||
if err != nil {
|
||||
return opts, err
|
||||
}
|
||||
|
||||
opts.RunRoot = filepath.Join(rootlessRuntime, "containers")
|
||||
if err := os.MkdirAll(opts.RunRoot, 0o700); err != nil {
|
||||
return opts, fmt.Errorf("unable to make rootless runtime: %w", err)
|
||||
}
|
||||
|
||||
opts.PullOptions = systemOpts.PullOptions
|
||||
if systemOpts.RootlessStoragePath != "" {
|
||||
opts.GraphRoot, err = expandEnvPath(systemOpts.RootlessStoragePath, rootlessUID)
|
||||
@ -343,12 +369,6 @@ func getRootlessStorageOpts(rootlessUID int, systemOpts StoreOptions) (StoreOpti
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
// DefaultStoreOptionsAutoDetectUID returns the default storage ops for containers
|
||||
func DefaultStoreOptionsAutoDetectUID() (StoreOptions, error) {
|
||||
uid := getRootlessUID()
|
||||
return DefaultStoreOptions(uid != 0, uid)
|
||||
}
|
||||
|
||||
var prevReloadConfig = struct {
|
||||
storeOptions *StoreOptions
|
||||
mod time.Time
|
||||
@ -518,8 +538,8 @@ func Options() (StoreOptions, error) {
|
||||
}
|
||||
|
||||
// Save overwrites the tomlConfig in storage.conf with the given conf
|
||||
func Save(conf TomlConfig, rootless bool) error {
|
||||
configFile, err := DefaultConfigFile(rootless)
|
||||
func Save(conf TomlConfig) error {
|
||||
configFile, err := DefaultConfigFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -537,10 +557,10 @@ func Save(conf TomlConfig, rootless bool) error {
|
||||
}
|
||||
|
||||
// StorageConfig is used to retrieve the storage.conf toml in order to overwrite it
|
||||
func StorageConfig(rootless bool) (*TomlConfig, error) {
|
||||
func StorageConfig() (*TomlConfig, error) {
|
||||
config := new(TomlConfig)
|
||||
|
||||
configFile, err := DefaultConfigFile(rootless)
|
||||
configFile, err := DefaultConfigFile()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
151
vendor/github.com/containers/storage/types/utils.go
generated
vendored
151
vendor/github.com/containers/storage/types/utils.go
generated
vendored
@ -2,162 +2,15 @@ package types
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/pkg/homedir"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
|
||||
path, err := getRootlessRuntimeDir(rootlessUID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
path = filepath.Join(path, "containers")
|
||||
if err := os.MkdirAll(path, 0o700); err != nil {
|
||||
return "", fmt.Errorf("unable to make rootless runtime: %w", err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
type rootlessRuntimeDirEnvironment interface {
|
||||
getProcCommandFile() string
|
||||
getRunUserDir() string
|
||||
getTmpPerUserDir() string
|
||||
|
||||
homeDirGetRuntimeDir() (string, error)
|
||||
systemLstat(string) (*system.StatT, error)
|
||||
homedirGet() string
|
||||
}
|
||||
|
||||
type rootlessRuntimeDirEnvironmentImplementation struct {
|
||||
procCommandFile string
|
||||
runUserDir string
|
||||
tmpPerUserDir string
|
||||
}
|
||||
|
||||
func (env rootlessRuntimeDirEnvironmentImplementation) getProcCommandFile() string {
|
||||
return env.procCommandFile
|
||||
}
|
||||
|
||||
func (env rootlessRuntimeDirEnvironmentImplementation) getRunUserDir() string {
|
||||
return env.runUserDir
|
||||
}
|
||||
|
||||
func (env rootlessRuntimeDirEnvironmentImplementation) getTmpPerUserDir() string {
|
||||
return env.tmpPerUserDir
|
||||
}
|
||||
|
||||
func (rootlessRuntimeDirEnvironmentImplementation) homeDirGetRuntimeDir() (string, error) {
|
||||
return homedir.GetRuntimeDir()
|
||||
}
|
||||
|
||||
func (rootlessRuntimeDirEnvironmentImplementation) systemLstat(path string) (*system.StatT, error) {
|
||||
return system.Lstat(path)
|
||||
}
|
||||
|
||||
func (rootlessRuntimeDirEnvironmentImplementation) homedirGet() string {
|
||||
return homedir.Get()
|
||||
}
|
||||
|
||||
func isRootlessRuntimeDirOwner(dir string, env rootlessRuntimeDirEnvironment) bool {
|
||||
st, err := env.systemLstat(dir)
|
||||
return err == nil && int(st.UID()) == os.Getuid() && st.Mode()&0o700 == 0o700 && st.Mode()&0o066 == 0o000
|
||||
}
|
||||
|
||||
// getRootlessRuntimeDirIsolated is an internal implementation detail of getRootlessRuntimeDir to allow testing.
|
||||
// Everyone but the tests this is intended for should only call getRootlessRuntimeDir, never this function.
|
||||
func getRootlessRuntimeDirIsolated(env rootlessRuntimeDirEnvironment) (string, error) {
|
||||
runtimeDir, err := env.homeDirGetRuntimeDir()
|
||||
if err == nil {
|
||||
return runtimeDir, nil
|
||||
}
|
||||
|
||||
initCommand, err := os.ReadFile(env.getProcCommandFile())
|
||||
if err != nil || string(initCommand) == "systemd" {
|
||||
runUserDir := env.getRunUserDir()
|
||||
if isRootlessRuntimeDirOwner(runUserDir, env) {
|
||||
return runUserDir, nil
|
||||
}
|
||||
}
|
||||
|
||||
tmpPerUserDir := env.getTmpPerUserDir()
|
||||
if tmpPerUserDir != "" {
|
||||
if _, err := env.systemLstat(tmpPerUserDir); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(tmpPerUserDir, 0o700); err != nil {
|
||||
logrus.Errorf("Failed to create temp directory for user: %v", err)
|
||||
} else {
|
||||
return tmpPerUserDir, nil
|
||||
}
|
||||
} else if isRootlessRuntimeDirOwner(tmpPerUserDir, env) {
|
||||
return tmpPerUserDir, nil
|
||||
}
|
||||
}
|
||||
|
||||
homeDir := env.homedirGet()
|
||||
if homeDir == "" {
|
||||
return "", errors.New("neither XDG_RUNTIME_DIR nor temp dir nor HOME was set non-empty")
|
||||
}
|
||||
resolvedHomeDir, err := filepath.EvalSymlinks(homeDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(resolvedHomeDir, "rundir"), nil
|
||||
}
|
||||
|
||||
func getRootlessRuntimeDir(rootlessUID int) (string, error) {
|
||||
return getRootlessRuntimeDirIsolated(
|
||||
rootlessRuntimeDirEnvironmentImplementation{
|
||||
"/proc/1/comm",
|
||||
fmt.Sprintf("/run/user/%d", rootlessUID),
|
||||
fmt.Sprintf("%s/containers-user-%d", os.TempDir(), rootlessUID),
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// getRootlessDirInfo returns the parent path of where the storage for containers and
|
||||
// volumes will be in rootless mode
|
||||
func getRootlessDirInfo(rootlessUID int) (string, string, error) {
|
||||
rootlessRuntime, err := GetRootlessRuntimeDir(rootlessUID)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
dataDir, err := homedir.GetDataHome()
|
||||
if err == nil {
|
||||
return dataDir, rootlessRuntime, nil
|
||||
}
|
||||
|
||||
home := homedir.Get()
|
||||
if home == "" {
|
||||
return "", "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty: %w", err)
|
||||
}
|
||||
// runc doesn't like symlinks in the rootfs path, and at least
|
||||
// on CoreOS /home is a symlink to /var/home, so resolve any symlink.
|
||||
resolvedHome, err := filepath.EvalSymlinks(home)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
dataDir = filepath.Join(resolvedHome, ".local", "share")
|
||||
|
||||
return dataDir, rootlessRuntime, nil
|
||||
}
|
||||
|
||||
func getRootlessUID() int {
|
||||
uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID")
|
||||
if uidEnv != "" {
|
||||
u, _ := strconv.Atoi(uidEnv)
|
||||
return u
|
||||
}
|
||||
return os.Geteuid()
|
||||
}
|
||||
|
||||
func expandEnvPath(path string, rootlessUID int) (string, error) {
|
||||
var err error
|
||||
path = strings.Replace(path, "$UID", strconv.Itoa(rootlessUID), -1)
|
||||
@ -169,7 +22,7 @@ func expandEnvPath(path string, rootlessUID int) (string, error) {
|
||||
return newpath, nil
|
||||
}
|
||||
|
||||
func DefaultConfigFile(rootless bool) (string, error) {
|
||||
func DefaultConfigFile() (string, error) {
|
||||
if defaultConfigFileSet {
|
||||
return defaultConfigFile, nil
|
||||
}
|
||||
@ -177,7 +30,7 @@ func DefaultConfigFile(rootless bool) (string, error) {
|
||||
if path, ok := os.LookupEnv(storageConfEnv); ok {
|
||||
return path, nil
|
||||
}
|
||||
if !rootless {
|
||||
if !usePerUserStorage() {
|
||||
if _, err := os.Stat(defaultOverrideConfigFile); err == nil {
|
||||
return defaultOverrideConfigFile, nil
|
||||
}
|
||||
|
14
vendor/github.com/containers/storage/utils.go
generated
vendored
14
vendor/github.com/containers/storage/utils.go
generated
vendored
@ -11,19 +11,9 @@ func ParseIDMapping(UIDMapSlice, GIDMapSlice []string, subUIDMap, subGIDMap stri
|
||||
return types.ParseIDMapping(UIDMapSlice, GIDMapSlice, subUIDMap, subGIDMap)
|
||||
}
|
||||
|
||||
// GetRootlessRuntimeDir returns the runtime directory when running as non root
|
||||
func GetRootlessRuntimeDir(rootlessUID int) (string, error) {
|
||||
return types.GetRootlessRuntimeDir(rootlessUID)
|
||||
}
|
||||
|
||||
// DefaultStoreOptionsAutoDetectUID returns the default storage options for containers
|
||||
func DefaultStoreOptionsAutoDetectUID() (types.StoreOptions, error) {
|
||||
return types.DefaultStoreOptionsAutoDetectUID()
|
||||
}
|
||||
|
||||
// DefaultStoreOptions returns the default storage options for containers
|
||||
func DefaultStoreOptions(rootless bool, rootlessUID int) (types.StoreOptions, error) {
|
||||
return types.DefaultStoreOptions(rootless, rootlessUID)
|
||||
func DefaultStoreOptions() (types.StoreOptions, error) {
|
||||
return types.DefaultStoreOptions()
|
||||
}
|
||||
|
||||
func validateMountOptions(mountOptions []string) error {
|
||||
|
63
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
63
vendor/github.com/coreos/go-oidc/v3/oidc/oidc.go
generated
vendored
@ -94,12 +94,13 @@ func doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
|
||||
|
||||
// Provider represents an OpenID Connect server's configuration.
|
||||
type Provider struct {
|
||||
issuer string
|
||||
authURL string
|
||||
tokenURL string
|
||||
userInfoURL string
|
||||
jwksURL string
|
||||
algorithms []string
|
||||
issuer string
|
||||
authURL string
|
||||
tokenURL string
|
||||
deviceAuthURL string
|
||||
userInfoURL string
|
||||
jwksURL string
|
||||
algorithms []string
|
||||
|
||||
// Raw claims returned by the server.
|
||||
rawClaims []byte
|
||||
@ -128,12 +129,13 @@ func (p *Provider) remoteKeySet() KeySet {
|
||||
}
|
||||
|
||||
type providerJSON struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthURL string `json:"authorization_endpoint"`
|
||||
TokenURL string `json:"token_endpoint"`
|
||||
JWKSURL string `json:"jwks_uri"`
|
||||
UserInfoURL string `json:"userinfo_endpoint"`
|
||||
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||
Issuer string `json:"issuer"`
|
||||
AuthURL string `json:"authorization_endpoint"`
|
||||
TokenURL string `json:"token_endpoint"`
|
||||
DeviceAuthURL string `json:"device_authorization_endpoint"`
|
||||
JWKSURL string `json:"jwks_uri"`
|
||||
UserInfoURL string `json:"userinfo_endpoint"`
|
||||
Algorithms []string `json:"id_token_signing_alg_values_supported"`
|
||||
}
|
||||
|
||||
// supportedAlgorithms is a list of algorithms explicitly supported by this
|
||||
@ -165,6 +167,9 @@ type ProviderConfig struct {
|
||||
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// token endpoint.
|
||||
TokenURL string
|
||||
// DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0
|
||||
// device authorization endpoint.
|
||||
DeviceAuthURL string
|
||||
// UserInfoURL is the endpoint used by the provider to support the OpenID
|
||||
// Connect UserInfo flow.
|
||||
//
|
||||
@ -185,13 +190,14 @@ type ProviderConfig struct {
|
||||
// through discovery.
|
||||
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
|
||||
return &Provider{
|
||||
issuer: p.IssuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
jwksURL: p.JWKSURL,
|
||||
algorithms: p.Algorithms,
|
||||
client: getClient(ctx),
|
||||
issuer: p.IssuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
deviceAuthURL: p.DeviceAuthURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
jwksURL: p.JWKSURL,
|
||||
algorithms: p.Algorithms,
|
||||
client: getClient(ctx),
|
||||
}
|
||||
}
|
||||
|
||||
@ -240,14 +246,15 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
|
||||
}
|
||||
}
|
||||
return &Provider{
|
||||
issuer: issuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
jwksURL: p.JWKSURL,
|
||||
algorithms: algs,
|
||||
rawClaims: body,
|
||||
client: getClient(ctx),
|
||||
issuer: issuerURL,
|
||||
authURL: p.AuthURL,
|
||||
tokenURL: p.TokenURL,
|
||||
deviceAuthURL: p.DeviceAuthURL,
|
||||
userInfoURL: p.UserInfoURL,
|
||||
jwksURL: p.JWKSURL,
|
||||
algorithms: algs,
|
||||
rawClaims: body,
|
||||
client: getClient(ctx),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -273,7 +280,7 @@ func (p *Provider) Claims(v interface{}) error {
|
||||
|
||||
// Endpoint returns the OAuth2 auth and token endpoints for the given provider.
|
||||
func (p *Provider) Endpoint() oauth2.Endpoint {
|
||||
return oauth2.Endpoint{AuthURL: p.authURL, TokenURL: p.tokenURL}
|
||||
return oauth2.Endpoint{AuthURL: p.authURL, DeviceAuthURL: p.deviceAuthURL, TokenURL: p.tokenURL}
|
||||
}
|
||||
|
||||
// UserInfoEndpoint returns the OpenID Connect userinfo endpoint for the given
|
||||
|
34
vendor/github.com/docker/distribution/reference/helpers_deprecated.go
generated
vendored
34
vendor/github.com/docker/distribution/reference/helpers_deprecated.go
generated
vendored
@ -1,34 +0,0 @@
|
||||
package reference
|
||||
|
||||
import "github.com/distribution/reference"
|
||||
|
||||
// IsNameOnly returns true if reference only contains a repo name.
|
||||
//
|
||||
// Deprecated: use [reference.IsNameOnly].
|
||||
func IsNameOnly(ref reference.Named) bool {
|
||||
return reference.IsNameOnly(ref)
|
||||
}
|
||||
|
||||
// FamiliarName returns the familiar name string
|
||||
// for the given named, familiarizing if needed.
|
||||
//
|
||||
// Deprecated: use [reference.FamiliarName].
|
||||
func FamiliarName(ref reference.Named) string {
|
||||
return reference.FamiliarName(ref)
|
||||
}
|
||||
|
||||
// FamiliarString returns the familiar string representation
|
||||
// for the given reference, familiarizing if needed.
|
||||
//
|
||||
// Deprecated: use [reference.FamiliarString].
|
||||
func FamiliarString(ref reference.Reference) string {
|
||||
return reference.FamiliarString(ref)
|
||||
}
|
||||
|
||||
// FamiliarMatch reports whether ref matches the specified pattern.
|
||||
// See [path.Match] for supported patterns.
|
||||
//
|
||||
// Deprecated: use [reference.FamiliarMatch].
|
||||
func FamiliarMatch(pattern string, ref reference.Reference) (bool, error) {
|
||||
return reference.FamiliarMatch(pattern, ref)
|
||||
}
|
92
vendor/github.com/docker/distribution/reference/normalize_deprecated.go
generated
vendored
92
vendor/github.com/docker/distribution/reference/normalize_deprecated.go
generated
vendored
@ -1,92 +0,0 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/opencontainers/go-digest/digestset"
|
||||
)
|
||||
|
||||
// ParseNormalizedNamed parses a string into a named reference
|
||||
// transforming a familiar name from Docker UI to a fully
|
||||
// qualified reference. If the value may be an identifier
|
||||
// use ParseAnyReference.
|
||||
//
|
||||
// Deprecated: use [reference.ParseNormalizedNamed].
|
||||
func ParseNormalizedNamed(s string) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(s)
|
||||
}
|
||||
|
||||
// ParseDockerRef normalizes the image reference following the docker convention,
|
||||
// which allows for references to contain both a tag and a digest.
|
||||
//
|
||||
// Deprecated: use [reference.ParseDockerRef].
|
||||
func ParseDockerRef(ref string) (reference.Named, error) {
|
||||
return reference.ParseDockerRef(ref)
|
||||
}
|
||||
|
||||
// TagNameOnly adds the default tag "latest" to a reference if it only has
|
||||
// a repo name.
|
||||
//
|
||||
// Deprecated: use [reference.TagNameOnly].
|
||||
func TagNameOnly(ref reference.Named) reference.Named {
|
||||
return reference.TagNameOnly(ref)
|
||||
}
|
||||
|
||||
// ParseAnyReference parses a reference string as a possible identifier,
|
||||
// full digest, or familiar name.
|
||||
//
|
||||
// Deprecated: use [reference.ParseAnyReference].
|
||||
func ParseAnyReference(ref string) (reference.Reference, error) {
|
||||
return reference.ParseAnyReference(ref)
|
||||
}
|
||||
|
||||
// Functions and types below have been removed in distribution v3 and
|
||||
// have not been ported to github.com/distribution/reference. See
|
||||
// https://github.com/distribution/distribution/pull/3774
|
||||
|
||||
var (
|
||||
// ShortIdentifierRegexp is the format used to represent a prefix
|
||||
// of an identifier. A prefix may be used to match a sha256 identifier
|
||||
// within a list of trusted identifiers.
|
||||
//
|
||||
// Deprecated: support for short-identifiers is deprecated, and will be removed in v3.
|
||||
ShortIdentifierRegexp = regexp.MustCompile(shortIdentifier)
|
||||
|
||||
shortIdentifier = `([a-f0-9]{6,64})`
|
||||
|
||||
// anchoredShortIdentifierRegexp is used to check if a value
|
||||
// is a possible identifier prefix, anchored at start and end
|
||||
// of string.
|
||||
anchoredShortIdentifierRegexp = regexp.MustCompile(`^` + shortIdentifier + `$`)
|
||||
)
|
||||
|
||||
type digestReference digest.Digest
|
||||
|
||||
func (d digestReference) String() string {
|
||||
return digest.Digest(d).String()
|
||||
}
|
||||
|
||||
func (d digestReference) Digest() digest.Digest {
|
||||
return digest.Digest(d)
|
||||
}
|
||||
|
||||
// ParseAnyReferenceWithSet parses a reference string as a possible short
|
||||
// identifier to be matched in a digest set, a full digest, or familiar name.
|
||||
//
|
||||
// Deprecated: support for short-identifiers is deprecated, and will be removed in v3.
|
||||
func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) {
|
||||
if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok {
|
||||
dgst, err := ds.Lookup(ref)
|
||||
if err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
} else {
|
||||
if dgst, err := digest.Parse(ref); err == nil {
|
||||
return digestReference(dgst), nil
|
||||
}
|
||||
}
|
||||
|
||||
return reference.ParseNormalizedNamed(ref)
|
||||
}
|
172
vendor/github.com/docker/distribution/reference/reference_deprecated.go
generated
vendored
172
vendor/github.com/docker/distribution/reference/reference_deprecated.go
generated
vendored
@ -1,172 +0,0 @@
|
||||
// Package reference is deprecated, and has moved to github.com/distribution/reference.
|
||||
//
|
||||
// Deprecated: use github.com/distribution/reference instead.
|
||||
package reference
|
||||
|
||||
import (
|
||||
"github.com/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
const (
|
||||
// NameTotalLengthMax is the maximum total number of characters in a repository name.
|
||||
//
|
||||
// Deprecated: use [reference.NameTotalLengthMax].
|
||||
NameTotalLengthMax = reference.NameTotalLengthMax
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
|
||||
//
|
||||
// Deprecated: use [reference.ErrReferenceInvalidFormat].
|
||||
ErrReferenceInvalidFormat = reference.ErrReferenceInvalidFormat
|
||||
|
||||
// ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
//
|
||||
// Deprecated: use [reference.ErrTagInvalidFormat].
|
||||
ErrTagInvalidFormat = reference.ErrTagInvalidFormat
|
||||
|
||||
// ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
|
||||
//
|
||||
// Deprecated: use [reference.ErrDigestInvalidFormat].
|
||||
ErrDigestInvalidFormat = reference.ErrDigestInvalidFormat
|
||||
|
||||
// ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
|
||||
//
|
||||
// Deprecated: use [reference.ErrNameContainsUppercase].
|
||||
ErrNameContainsUppercase = reference.ErrNameContainsUppercase
|
||||
|
||||
// ErrNameEmpty is returned for empty, invalid repository names.
|
||||
//
|
||||
// Deprecated: use [reference.ErrNameEmpty].
|
||||
ErrNameEmpty = reference.ErrNameEmpty
|
||||
|
||||
// ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
|
||||
//
|
||||
// Deprecated: use [reference.ErrNameTooLong].
|
||||
ErrNameTooLong = reference.ErrNameTooLong
|
||||
|
||||
// ErrNameNotCanonical is returned when a name is not canonical.
|
||||
//
|
||||
// Deprecated: use [reference.ErrNameNotCanonical].
|
||||
ErrNameNotCanonical = reference.ErrNameNotCanonical
|
||||
)
|
||||
|
||||
// Reference is an opaque object reference identifier that may include
|
||||
// modifiers such as a hostname, name, tag, and digest.
|
||||
//
|
||||
// Deprecated: use [reference.Reference].
|
||||
type Reference = reference.Reference
|
||||
|
||||
// Field provides a wrapper type for resolving correct reference types when
|
||||
// working with encoding.
|
||||
//
|
||||
// Deprecated: use [reference.Field].
|
||||
type Field = reference.Field
|
||||
|
||||
// AsField wraps a reference in a Field for encoding.
|
||||
//
|
||||
// Deprecated: use [reference.AsField].
|
||||
func AsField(ref reference.Reference) reference.Field {
|
||||
return reference.AsField(ref)
|
||||
}
|
||||
|
||||
// Named is an object with a full name
|
||||
//
|
||||
// Deprecated: use [reference.Named].
|
||||
type Named = reference.Named
|
||||
|
||||
// Tagged is an object which has a tag
|
||||
//
|
||||
// Deprecated: use [reference.Tagged].
|
||||
type Tagged = reference.Tagged
|
||||
|
||||
// NamedTagged is an object including a name and tag.
|
||||
//
|
||||
// Deprecated: use [reference.NamedTagged].
|
||||
type NamedTagged reference.NamedTagged
|
||||
|
||||
// Digested is an object which has a digest
|
||||
// in which it can be referenced by
|
||||
//
|
||||
// Deprecated: use [reference.Digested].
|
||||
type Digested reference.Digested
|
||||
|
||||
// Canonical reference is an object with a fully unique
|
||||
// name including a name with domain and digest
|
||||
//
|
||||
// Deprecated: use [reference.Canonical].
|
||||
type Canonical reference.Canonical
|
||||
|
||||
// Domain returns the domain part of the [Named] reference.
|
||||
//
|
||||
// Deprecated: use [reference.Domain].
|
||||
func Domain(named reference.Named) string {
|
||||
return reference.Domain(named)
|
||||
}
|
||||
|
||||
// Path returns the name without the domain part of the [Named] reference.
|
||||
//
|
||||
// Deprecated: use [reference.Path].
|
||||
func Path(named reference.Named) (name string) {
|
||||
return reference.Path(named)
|
||||
}
|
||||
|
||||
// SplitHostname splits a named reference into a
|
||||
// hostname and name string. If no valid hostname is
|
||||
// found, the hostname is empty and the full value
|
||||
// is returned as name
|
||||
//
|
||||
// Deprecated: Use [reference.Domain] or [reference.Path].
|
||||
func SplitHostname(named reference.Named) (string, string) {
|
||||
return reference.SplitHostname(named)
|
||||
}
|
||||
|
||||
// Parse parses s and returns a syntactically valid Reference.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
//
|
||||
// Deprecated: use [reference.Parse].
|
||||
func Parse(s string) (reference.Reference, error) {
|
||||
return reference.Parse(s)
|
||||
}
|
||||
|
||||
// ParseNamed parses s and returns a syntactically valid reference implementing
|
||||
// the Named interface. The reference must have a name and be in the canonical
|
||||
// form, otherwise an error is returned.
|
||||
// If an error was encountered it is returned, along with a nil Reference.
|
||||
//
|
||||
// Deprecated: use [reference.ParseNamed].
|
||||
func ParseNamed(s string) (reference.Named, error) {
|
||||
return reference.ParseNamed(s)
|
||||
}
|
||||
|
||||
// WithName returns a named object representing the given string. If the input
|
||||
// is invalid ErrReferenceInvalidFormat will be returned.
|
||||
//
|
||||
// Deprecated: use [reference.WithName].
|
||||
func WithName(name string) (reference.Named, error) {
|
||||
return reference.WithName(name)
|
||||
}
|
||||
|
||||
// WithTag combines the name from "name" and the tag from "tag" to form a
|
||||
// reference incorporating both the name and the tag.
|
||||
//
|
||||
// Deprecated: use [reference.WithTag].
|
||||
func WithTag(name reference.Named, tag string) (reference.NamedTagged, error) {
|
||||
return reference.WithTag(name, tag)
|
||||
}
|
||||
|
||||
// WithDigest combines the name from "name" and the digest from "digest" to form
|
||||
// a reference incorporating both the name and the digest.
|
||||
//
|
||||
// Deprecated: use [reference.WithDigest].
|
||||
func WithDigest(name reference.Named, digest digest.Digest) (reference.Canonical, error) {
|
||||
return reference.WithDigest(name, digest)
|
||||
}
|
||||
|
||||
// TrimNamed removes any tag or digest from the named reference.
|
||||
//
|
||||
// Deprecated: use [reference.TrimNamed].
|
||||
func TrimNamed(ref reference.Named) reference.Named {
|
||||
return reference.TrimNamed(ref)
|
||||
}
|
50
vendor/github.com/docker/distribution/reference/regexp_deprecated.go
generated
vendored
50
vendor/github.com/docker/distribution/reference/regexp_deprecated.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
package reference
|
||||
|
||||
import (
|
||||
"github.com/distribution/reference"
|
||||
)
|
||||
|
||||
// DigestRegexp matches well-formed digests, including algorithm (e.g. "sha256:<encoded>").
|
||||
//
|
||||
// Deprecated: use [reference.DigestRegexp].
|
||||
var DigestRegexp = reference.DigestRegexp
|
||||
|
||||
// DomainRegexp matches hostname or IP-addresses, optionally including a port
|
||||
// number. It defines the structure of potential domain components that may be
|
||||
// part of image names. This is purposely a subset of what is allowed by DNS to
|
||||
// ensure backwards compatibility with Docker image names. It may be a subset of
|
||||
// DNS domain name, an IPv4 address in decimal format, or an IPv6 address between
|
||||
// square brackets (excluding zone identifiers as defined by [RFC 6874] or special
|
||||
// addresses such as IPv4-Mapped).
|
||||
//
|
||||
// Deprecated: use [reference.DomainRegexp].
|
||||
//
|
||||
// [RFC 6874]: https://www.rfc-editor.org/rfc/rfc6874.
|
||||
var DomainRegexp = reference.DigestRegexp
|
||||
|
||||
// IdentifierRegexp is the format for string identifier used as a
|
||||
// content addressable identifier using sha256. These identifiers
|
||||
// are like digests without the algorithm, since sha256 is used.
|
||||
//
|
||||
// Deprecated: use [reference.IdentifierRegexp].
|
||||
var IdentifierRegexp = reference.IdentifierRegexp
|
||||
|
||||
// NameRegexp is the format for the name component of references, including
|
||||
// an optional domain and port, but without tag or digest suffix.
|
||||
//
|
||||
// Deprecated: use [reference.NameRegexp].
|
||||
var NameRegexp = reference.NameRegexp
|
||||
|
||||
// ReferenceRegexp is the full supported format of a reference. The regexp
|
||||
// is anchored and has capturing groups for name, tag, and digest
|
||||
// components.
|
||||
//
|
||||
// Deprecated: use [reference.ReferenceRegexp].
|
||||
var ReferenceRegexp = reference.ReferenceRegexp
|
||||
|
||||
// TagRegexp matches valid tag names. From [docker/docker:graph/tags.go].
|
||||
//
|
||||
// Deprecated: use [reference.TagRegexp].
|
||||
//
|
||||
// [docker/docker:graph/tags.go]: https://github.com/moby/moby/blob/v1.6.0/graph/tags.go#L26-L28
|
||||
var TagRegexp = reference.TagRegexp
|
10
vendor/github.com/docker/distribution/reference/sort_deprecated.go
generated
vendored
10
vendor/github.com/docker/distribution/reference/sort_deprecated.go
generated
vendored
@ -1,10 +0,0 @@
|
||||
package reference
|
||||
|
||||
import "github.com/distribution/reference"
|
||||
|
||||
// Sort sorts string references preferring higher information references.
|
||||
//
|
||||
// Deprecated: use [reference.Sort].
|
||||
func Sort(references []string) []string {
|
||||
return reference.Sort(references)
|
||||
}
|
17
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
17
vendor/github.com/docker/docker-credential-helpers/client/client.go
generated
vendored
@ -16,11 +16,9 @@ func isValidCredsMessage(msg string) error {
|
||||
if credentials.IsCredentialsMissingServerURLMessage(msg) {
|
||||
return credentials.NewErrCredentialsMissingServerURL()
|
||||
}
|
||||
|
||||
if credentials.IsCredentialsMissingUsernameMessage(msg) {
|
||||
return credentials.NewErrCredentialsMissingUsername()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -36,13 +34,10 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error {
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t := strings.TrimSpace(string(out))
|
||||
|
||||
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||
if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil {
|
||||
err = isValidErr
|
||||
}
|
||||
|
||||
return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t)
|
||||
return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out)))
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -55,17 +50,15 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error
|
||||
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
t := strings.TrimSpace(string(out))
|
||||
|
||||
if credentials.IsErrCredentialsNotFoundMessage(t) {
|
||||
if credentials.IsErrCredentialsNotFoundMessage(string(out)) {
|
||||
return nil, credentials.NewErrCredentialsNotFound()
|
||||
}
|
||||
|
||||
if isValidErr := isValidCredsMessage(t); isValidErr != nil {
|
||||
if isValidErr := isValidCredsMessage(string(out)); isValidErr != nil {
|
||||
err = isValidErr
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t)
|
||||
return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, strings.TrimSpace(string(out)))
|
||||
}
|
||||
|
||||
resp := &credentials.Credentials{
|
||||
|
11
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
11
vendor/github.com/docker/docker-credential-helpers/credentials/error.go
generated
vendored
@ -1,6 +1,9 @@
|
||||
package credentials
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// ErrCredentialsNotFound standardizes the not found error, so every helper returns
|
||||
@ -47,7 +50,7 @@ func IsErrCredentialsNotFound(err error) bool {
|
||||
// This function helps to check messages returned by an
|
||||
// external program via its standard output.
|
||||
func IsErrCredentialsNotFoundMessage(err string) bool {
|
||||
return err == errCredentialsNotFoundMessage
|
||||
return strings.TrimSpace(err) == errCredentialsNotFoundMessage
|
||||
}
|
||||
|
||||
// errCredentialsMissingServerURL represents an error raised
|
||||
@ -104,7 +107,7 @@ func IsCredentialsMissingServerURL(err error) bool {
|
||||
// IsCredentialsMissingServerURLMessage checks for an
|
||||
// errCredentialsMissingServerURL in the error message.
|
||||
func IsCredentialsMissingServerURLMessage(err string) bool {
|
||||
return err == errCredentialsMissingServerURLMessage
|
||||
return strings.TrimSpace(err) == errCredentialsMissingServerURLMessage
|
||||
}
|
||||
|
||||
// IsCredentialsMissingUsername returns true if the error
|
||||
@ -117,5 +120,5 @@ func IsCredentialsMissingUsername(err error) bool {
|
||||
// IsCredentialsMissingUsernameMessage checks for an
|
||||
// errCredentialsMissingUsername in the error message.
|
||||
func IsCredentialsMissingUsernameMessage(err string) bool {
|
||||
return err == errCredentialsMissingUsernameMessage
|
||||
return strings.TrimSpace(err) == errCredentialsMissingUsernameMessage
|
||||
}
|
||||
|
49
vendor/github.com/docker/docker/AUTHORS
generated
vendored
49
vendor/github.com/docker/docker/AUTHORS
generated
vendored
@ -27,6 +27,7 @@ Adam Miller <admiller@redhat.com>
|
||||
Adam Mills <adam@armills.info>
|
||||
Adam Pointer <adam.pointer@skybettingandgaming.com>
|
||||
Adam Singer <financeCoding@gmail.com>
|
||||
Adam Thornton <adam.thornton@maryville.com>
|
||||
Adam Walz <adam@adamwalz.net>
|
||||
Adam Williams <awilliams@mirantis.com>
|
||||
AdamKorcz <adam@adalogics.com>
|
||||
@ -173,6 +174,7 @@ Andy Rothfusz <github@developersupport.net>
|
||||
Andy Smith <github@anarkystic.com>
|
||||
Andy Wilson <wilson.andrew.j+github@gmail.com>
|
||||
Andy Zhang <andy.zhangtao@hotmail.com>
|
||||
Aneesh Kulkarni <askthefactorcamera@gmail.com>
|
||||
Anes Hasicic <anes.hasicic@gmail.com>
|
||||
Angel Velazquez <angelcar@amazon.com>
|
||||
Anil Belur <askb23@gmail.com>
|
||||
@ -236,6 +238,7 @@ Ben Golub <ben.golub@dotcloud.com>
|
||||
Ben Gould <ben@bengould.co.uk>
|
||||
Ben Hall <ben@benhall.me.uk>
|
||||
Ben Langfeld <ben@langfeld.me>
|
||||
Ben Lovy <ben@deciduously.com>
|
||||
Ben Sargent <ben@brokendigits.com>
|
||||
Ben Severson <BenSeverson@users.noreply.github.com>
|
||||
Ben Toews <mastahyeti@gmail.com>
|
||||
@ -262,7 +265,7 @@ Billy Ridgway <wrridgwa@us.ibm.com>
|
||||
Bily Zhang <xcoder@tenxcloud.com>
|
||||
Bin Liu <liubin0329@gmail.com>
|
||||
Bingshen Wang <bingshen.wbs@alibaba-inc.com>
|
||||
Bjorn Neergaard <bneergaard@mirantis.com>
|
||||
Bjorn Neergaard <bjorn@neersighted.com>
|
||||
Blake Geno <blakegeno@gmail.com>
|
||||
Boaz Shuster <ripcurld.github@gmail.com>
|
||||
bobby abbott <ttobbaybbob@gmail.com>
|
||||
@ -279,6 +282,7 @@ Brandon Liu <bdon@bdon.org>
|
||||
Brandon Philips <brandon.philips@coreos.com>
|
||||
Brandon Rhodes <brandon@rhodesmill.org>
|
||||
Brendan Dixon <brendand@microsoft.com>
|
||||
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
|
||||
Brent Salisbury <brent.salisbury@docker.com>
|
||||
Brett Higgins <brhiggins@arbor.net>
|
||||
Brett Kochendorfer <brett.kochendorfer@gmail.com>
|
||||
@ -363,6 +367,7 @@ chenyuzhu <chenyuzhi@oschina.cn>
|
||||
Chetan Birajdar <birajdar.chetan@gmail.com>
|
||||
Chewey <prosto-chewey@users.noreply.github.com>
|
||||
Chia-liang Kao <clkao@clkao.org>
|
||||
Chiranjeevi Tirunagari <vchiranjeeviak.tirunagari@gmail.com>
|
||||
chli <chli@freewheel.tv>
|
||||
Cholerae Hu <choleraehyq@gmail.com>
|
||||
Chris Alfonso <calfonso@redhat.com>
|
||||
@ -433,8 +438,8 @@ Cristian Staretu <cristian.staretu@gmail.com>
|
||||
cristiano balducci <cristiano.balducci@gmail.com>
|
||||
Cristina Yenyxe Gonzalez Garcia <cristina.yenyxe@gmail.com>
|
||||
Cruceru Calin-Cristian <crucerucalincristian@gmail.com>
|
||||
cui fliter <imcusg@gmail.com>
|
||||
CUI Wei <ghostplant@qq.com>
|
||||
cuishuang <imcusg@gmail.com>
|
||||
Cuong Manh Le <cuong.manhle.vn@gmail.com>
|
||||
Cyprian Gracz <cyprian.gracz@micro-jumbo.eu>
|
||||
Cyril F <cyrilf7x@gmail.com>
|
||||
@ -513,6 +518,7 @@ David Dooling <dooling@gmail.com>
|
||||
David Gageot <david@gageot.net>
|
||||
David Gebler <davidgebler@gmail.com>
|
||||
David Glasser <glasser@davidglasser.net>
|
||||
David Karlsson <35727626+dvdksn@users.noreply.github.com>
|
||||
David Lawrence <david.lawrence@docker.com>
|
||||
David Lechner <david@lechnology.com>
|
||||
David M. Karr <davidmichaelkarr@gmail.com>
|
||||
@ -602,6 +608,7 @@ Donald Huang <don.hcd@gmail.com>
|
||||
Dong Chen <dongluo.chen@docker.com>
|
||||
Donghwa Kim <shanytt@gmail.com>
|
||||
Donovan Jones <git@gamma.net.nz>
|
||||
Dorin Geman <dorin.geman@docker.com>
|
||||
Doron Podoleanu <doronp@il.ibm.com>
|
||||
Doug Davis <dug@us.ibm.com>
|
||||
Doug MacEachern <dougm@vmware.com>
|
||||
@ -636,6 +643,7 @@ Emily Rose <emily@contactvibe.com>
|
||||
Emir Ozer <emirozer@yandex.com>
|
||||
Eng Zer Jun <engzerjun@gmail.com>
|
||||
Enguerran <engcolson@gmail.com>
|
||||
Enrico Weigelt, metux IT consult <info@metux.net>
|
||||
Eohyung Lee <liquidnuker@gmail.com>
|
||||
epeterso <epeterson@breakpoint-labs.com>
|
||||
er0k <er0k@er0k.net>
|
||||
@ -676,6 +684,7 @@ Evan Allrich <evan@unguku.com>
|
||||
Evan Carmi <carmi@users.noreply.github.com>
|
||||
Evan Hazlett <ejhazlett@gmail.com>
|
||||
Evan Krall <krall@yelp.com>
|
||||
Evan Lezar <elezar@nvidia.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Evan Wies <evan@neomantra.net>
|
||||
Evelyn Xu <evelynhsu21@gmail.com>
|
||||
@ -744,6 +753,7 @@ Frank Groeneveld <frank@ivaldi.nl>
|
||||
Frank Herrmann <fgh@4gh.tv>
|
||||
Frank Macreery <frank@macreery.com>
|
||||
Frank Rosquin <frank.rosquin+github@gmail.com>
|
||||
Frank Villaro-Dixon <frank.villarodixon@merkle.com>
|
||||
Frank Yang <yyb196@gmail.com>
|
||||
Fred Lifton <fred.lifton@docker.com>
|
||||
Frederick F. Kautz IV <fkautz@redhat.com>
|
||||
@ -983,6 +993,7 @@ Jean Rouge <rougej+github@gmail.com>
|
||||
Jean-Baptiste Barth <jeanbaptiste.barth@gmail.com>
|
||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
||||
Jean-Christophe Berthon <huygens@berthon.eu>
|
||||
Jean-Michel Rouet <jm.rouet@gmail.com>
|
||||
Jean-Paul Calderone <exarkun@twistedmatrix.com>
|
||||
Jean-Pierre Huynh <jean-pierre.huynh@ounet.fr>
|
||||
Jean-Tiare Le Bigot <jt@yadutaf.fr>
|
||||
@ -1013,6 +1024,7 @@ Jeroen Jacobs <github@jeroenj.be>
|
||||
Jesse Dearing <jesse.dearing@gmail.com>
|
||||
Jesse Dubay <jesse@thefortytwo.net>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jeyanthinath Muthuram <jeyanthinath10@gmail.com>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jhon Honce <jhonce@redhat.com>
|
||||
Ji.Zhilong <zhilongji@gmail.com>
|
||||
@ -1141,6 +1153,7 @@ junxu <xujun@cmss.chinamobile.com>
|
||||
Jussi Nummelin <jussi.nummelin@gmail.com>
|
||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||
Justen Martin <jmart@the-coder.com>
|
||||
Justin Chadwell <me@jedevc.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
Justin Force <justin.force@gmail.com>
|
||||
Justin Keller <85903732+jk-vb@users.noreply.github.com>
|
||||
@ -1183,6 +1196,7 @@ Ke Xu <leonhartx.k@gmail.com>
|
||||
Kei Ohmura <ohmura.kei@gmail.com>
|
||||
Keith Hudgins <greenman@greenman.org>
|
||||
Keli Hu <dev@keli.hu>
|
||||
Ken Bannister <kb2ma@runbox.com>
|
||||
Ken Cochrane <kencochrane@gmail.com>
|
||||
Ken Herner <kherner@progress.com>
|
||||
Ken ICHIKAWA <ichikawa.ken@jp.fujitsu.com>
|
||||
@ -1192,7 +1206,7 @@ Kenjiro Nakayama <nakayamakenjiro@gmail.com>
|
||||
Kent Johnson <kentoj@gmail.com>
|
||||
Kenta Tada <Kenta.Tada@sony.com>
|
||||
Kevin "qwazerty" Houdebert <kevin.houdebert@gmail.com>
|
||||
Kevin Alvarez <crazy-max@users.noreply.github.com>
|
||||
Kevin Alvarez <github@crazymax.dev>
|
||||
Kevin Burke <kev@inburke.com>
|
||||
Kevin Clark <kevin.clark@gmail.com>
|
||||
Kevin Feyrer <kevin.feyrer@btinternet.com>
|
||||
@ -1225,6 +1239,7 @@ Konstantin Gribov <grossws@gmail.com>
|
||||
Konstantin L <sw.double@gmail.com>
|
||||
Konstantin Pelykh <kpelykh@zettaset.com>
|
||||
Kostadin Plachkov <k.n.plachkov@gmail.com>
|
||||
kpcyrd <git@rxv.cc>
|
||||
Krasi Georgiev <krasi@vip-consult.solutions>
|
||||
Krasimir Georgiev <support@vip-consult.co.uk>
|
||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||
@ -1306,6 +1321,7 @@ Lorenzo Fontana <fontanalorenz@gmail.com>
|
||||
Lotus Fenn <fenn.lotus@gmail.com>
|
||||
Louis Delossantos <ldelossa.ld@gmail.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Luboslav Pivarc <lpivarc@redhat.com>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
Luca Orlandi <luca.orlandi@gmail.com>
|
||||
@ -1344,6 +1360,7 @@ Manuel Meurer <manuel@krautcomputing.com>
|
||||
Manuel Rüger <manuel@rueg.eu>
|
||||
Manuel Woelker <github@manuel.woelker.org>
|
||||
mapk0y <mapk0y@gmail.com>
|
||||
Marat Radchenko <marat@slonopotamus.org>
|
||||
Marc Abramowitz <marc@marc-abramowitz.com>
|
||||
Marc Kuo <kuomarc2@gmail.com>
|
||||
Marc Tamsky <mtamsky@gmail.com>
|
||||
@ -1383,6 +1400,7 @@ Martijn van Oosterhout <kleptog@svana.org>
|
||||
Martin Braun <braun@neuroforge.de>
|
||||
Martin Dojcak <martin.dojcak@lablabs.io>
|
||||
Martin Honermeyer <maze@strahlungsfrei.de>
|
||||
Martin Jirku <martin@jirku.sk>
|
||||
Martin Kelly <martin@surround.io>
|
||||
Martin Mosegaard Amdisen <martin.amdisen@praqma.com>
|
||||
Martin Muzatko <martin@happy-css.com>
|
||||
@ -1461,6 +1479,7 @@ Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||
Michael Hudson-Doyle <michael.hudson@canonical.com>
|
||||
Michael Huettermann <michael@huettermann.net>
|
||||
Michael Irwin <mikesir87@gmail.com>
|
||||
Michael Kebe <michael.kebe@hkm.de>
|
||||
Michael Kuehn <micha@kuehn.io>
|
||||
Michael Käufl <docker@c.michael-kaeufl.de>
|
||||
Michael Neale <michael.neale@gmail.com>
|
||||
@ -1509,10 +1528,11 @@ Mike Lundy <mike@fluffypenguin.org>
|
||||
Mike MacCana <mike.maccana@gmail.com>
|
||||
Mike Naberezny <mike@naberezny.com>
|
||||
Mike Snitzer <snitzer@redhat.com>
|
||||
Mike Sul <mike.sul@foundries.io>
|
||||
mikelinjie <294893458@qq.com>
|
||||
Mikhail Sobolev <mss@mawhrin.net>
|
||||
Miklos Szegedi <miklos.szegedi@cloudera.com>
|
||||
Milas Bowman <milasb@gmail.com>
|
||||
Milas Bowman <devnull@milas.dev>
|
||||
Milind Chawre <milindchawre@gmail.com>
|
||||
Miloslav Trmač <mitr@redhat.com>
|
||||
mingqing <limingqing@cyou-inc.com>
|
||||
@ -1524,6 +1544,7 @@ mlarcher <github@ringabell.org>
|
||||
Mohammad Banikazemi <MBanikazemi@gmail.com>
|
||||
Mohammad Nasirifar <farnasirim@gmail.com>
|
||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||
Mohd Sadiq <mohdsadiq058@gmail.com>
|
||||
Mohit Soni <mosoni@ebay.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
@ -1606,6 +1627,7 @@ Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
NobodyOnSE <ich@sektor.selfip.com>
|
||||
noducks <onemannoducks@gmail.com>
|
||||
Nolan Darilek <nolan@thewordnerd.info>
|
||||
Nolan Miles <nolanpmiles@gmail.com>
|
||||
Noriki Nakamura <noriki.nakamura@miraclelinux.com>
|
||||
nponeccop <andy.melnikov@gmail.com>
|
||||
Nurahmadie <nurahmadie@gmail.com>
|
||||
@ -1661,6 +1683,7 @@ Paul Lietar <paul@lietar.net>
|
||||
Paul Liljenberg <liljenberg.paul@gmail.com>
|
||||
Paul Morie <pmorie@gmail.com>
|
||||
Paul Nasrat <pnasrat@gmail.com>
|
||||
Paul Seiffert <paul.seiffert@jimdo.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Paulo Gomes <pjbgf@linux.com>
|
||||
Paulo Ribeiro <paigr.io@gmail.com>
|
||||
@ -1674,6 +1697,7 @@ Pavlos Ratis <dastergon@gentoo.org>
|
||||
Pavol Vargovcik <pallly.vargovcik@gmail.com>
|
||||
Pawel Konczalski <mail@konczalski.de>
|
||||
Paweł Gronowski <pawel.gronowski@docker.com>
|
||||
payall4u <payall4u@qq.com>
|
||||
Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
|
||||
Peggy Li <peggyli.224@gmail.com>
|
||||
Pei Su <sillyousu@gmail.com>
|
||||
@ -1703,7 +1727,9 @@ Phil Estes <estesp@gmail.com>
|
||||
Phil Sphicas <phil.sphicas@att.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Philip Alexander Etling <paetling@gmail.com>
|
||||
Philip K. Warren <pkwarren@gmail.com>
|
||||
Philip Monroe <phil@philmonroe.com>
|
||||
Philipp Fruck <dev@p-fruck.de>
|
||||
Philipp Gillé <philipp.gille@gmail.com>
|
||||
Philipp Wahala <philipp.wahala@gmail.com>
|
||||
Philipp Weissensteiner <mail@philippweissensteiner.com>
|
||||
@ -1741,6 +1767,7 @@ Quentin Brossard <qbrossard@gmail.com>
|
||||
Quentin Perez <qperez@ocs.online.net>
|
||||
Quentin Tayssier <qtayssier@gmail.com>
|
||||
r0n22 <cameron.regan@gmail.com>
|
||||
Rachit Sharma <rachitsharma613@gmail.com>
|
||||
Radostin Stoyanov <rstoyanov1@gmail.com>
|
||||
Rafal Jeczalik <rjeczalik@gmail.com>
|
||||
Rafe Colton <rafael.colton@gmail.com>
|
||||
@ -1773,6 +1800,7 @@ Rich Horwood <rjhorwood@apple.com>
|
||||
Rich Moyse <rich@moyse.us>
|
||||
Rich Seymour <rseymour@gmail.com>
|
||||
Richard Burnison <rburnison@ebay.com>
|
||||
Richard Hansen <rhansen@rhansen.org>
|
||||
Richard Harvey <richard@squarecows.com>
|
||||
Richard Mathie <richard.mathie@amey.co.uk>
|
||||
Richard Metzler <richard@paadee.com>
|
||||
@ -1788,6 +1816,7 @@ Ritesh H Shukla <sritesh@vmware.com>
|
||||
Riyaz Faizullabhoy <riyaz.faizullabhoy@docker.com>
|
||||
Rob Cowsill <42620235+rcowsill@users.noreply.github.com>
|
||||
Rob Gulewich <rgulewich@netflix.com>
|
||||
Rob Murray <rob.murray@docker.com>
|
||||
Rob Vesse <rvesse@dotnetrdf.org>
|
||||
Robert Bachmann <rb@robertbachmann.at>
|
||||
Robert Bittle <guywithnose@gmail.com>
|
||||
@ -1869,6 +1898,7 @@ ryancooper7 <ryan.cooper7@gmail.com>
|
||||
RyanDeng <sheldon.d1018@gmail.com>
|
||||
Ryo Nakao <nakabonne@gmail.com>
|
||||
Ryoga Saito <contact@proelbtn.com>
|
||||
Régis Behmo <regis@behmo.com>
|
||||
Rémy Greinhofer <remy.greinhofer@livelovely.com>
|
||||
s. rannou <mxs@sbrk.org>
|
||||
Sabin Basyal <sabin.basyal@gmail.com>
|
||||
@ -1885,6 +1915,7 @@ Sam J Sharpe <sam.sharpe@digital.cabinet-office.gov.uk>
|
||||
Sam Neirinck <sam@samneirinck.com>
|
||||
Sam Reis <sreis@atlassian.com>
|
||||
Sam Rijs <srijs@airpost.net>
|
||||
Sam Thibault <sam.thibault@docker.com>
|
||||
Sam Whited <sam@samwhited.com>
|
||||
Sambuddha Basu <sambuddhabasu1@gmail.com>
|
||||
Sami Wagiaalla <swagiaal@redhat.com>
|
||||
@ -1908,6 +1939,7 @@ Satoshi Tagomori <tagomoris@gmail.com>
|
||||
Scott Bessler <scottbessler@gmail.com>
|
||||
Scott Collier <emailscottcollier@gmail.com>
|
||||
Scott Johnston <scott@docker.com>
|
||||
Scott Moser <smoser@brickies.net>
|
||||
Scott Percival <scottp@lastyard.com>
|
||||
Scott Stamp <scottstamp851@gmail.com>
|
||||
Scott Walls <sawalls@umich.edu>
|
||||
@ -1923,6 +1955,7 @@ Sebastiaan van Steenis <mail@superseb.nl>
|
||||
Sebastiaan van Stijn <github@gone.nl>
|
||||
Sebastian Höffner <sebastian.hoeffner@mevis.fraunhofer.de>
|
||||
Sebastian Radloff <sradloff23@gmail.com>
|
||||
Sebastian Thomschke <sebthom@users.noreply.github.com>
|
||||
Sebastien Goasguen <runseb@gmail.com>
|
||||
Senthil Kumar Selvaraj <senthil.thecoder@gmail.com>
|
||||
Senthil Kumaran <senthil@uthcode.com>
|
||||
@ -1996,6 +2029,7 @@ Stanislav Bondarenko <stanislav.bondarenko@gmail.com>
|
||||
Stanislav Levin <slev@altlinux.org>
|
||||
Steeve Morin <steeve.morin@gmail.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com>
|
||||
Stefan Gehrig <stefan.gehrig.hn@googlemail.com>
|
||||
Stefan J. Wernli <swernli@microsoft.com>
|
||||
Stefan Praszalowicz <stefan@greplin.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
@ -2003,6 +2037,7 @@ Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Staudenmeyer <doerte@instana.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
Steffen Butzer <steffen.butzer@outlook.com>
|
||||
Stephan Henningsen <stephan-henningsen@users.noreply.github.com>
|
||||
Stephan Spindler <shutefan@gmail.com>
|
||||
Stephen Benjamin <stephen@redhat.com>
|
||||
Stephen Crosby <stevecrozz@gmail.com>
|
||||
@ -2204,6 +2239,7 @@ Vinod Kulkarni <vinod.kulkarni@gmail.com>
|
||||
Vishal Doshi <vishal.doshi@gmail.com>
|
||||
Vishnu Kannan <vishnuk@google.com>
|
||||
Vitaly Ostrosablin <vostrosablin@virtuozzo.com>
|
||||
Vitor Anjos <bartier@users.noreply.github.com>
|
||||
Vitor Monteiro <vmrmonteiro@gmail.com>
|
||||
Vivek Agarwal <me@vivek.im>
|
||||
Vivek Dasgupta <vdasgupt@redhat.com>
|
||||
@ -2250,6 +2286,7 @@ Wenxuan Zhao <viz@linux.com>
|
||||
Wenyu You <21551128@zju.edu.cn>
|
||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Wesley Pettit <wppttt@amazon.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
Wiktor Kwapisiewicz <wiktor@metacode.biz>
|
||||
Will Dietz <w@wdtz.org>
|
||||
@ -2289,7 +2326,7 @@ xiekeyang <xiekeyang@huawei.com>
|
||||
Ximo Guanter Gonzálbez <joaquin.guantergonzalbez@telefonica.com>
|
||||
xin.li <xin.li@daocloud.io>
|
||||
Xinbo Weng <xihuanbo_0521@zju.edu.cn>
|
||||
Xinfeng Liu <xinfeng.liu@gmail.com>
|
||||
Xinfeng Liu <XinfengLiu@icloud.com>
|
||||
Xinzi Zhou <imdreamrunner@gmail.com>
|
||||
Xiuming Chen <cc@cxm.cc>
|
||||
Xuecong Liao <satorulogic@gmail.com>
|
||||
@ -2355,6 +2392,7 @@ Zen Lin(Zhinan Lin) <linzhinan@huawei.com>
|
||||
Zhang Kun <zkazure@gmail.com>
|
||||
Zhang Wei <zhangwei555@huawei.com>
|
||||
Zhang Wentao <zhangwentao234@huawei.com>
|
||||
zhangguanzhang <zhangguanzhang@qq.com>
|
||||
ZhangHang <stevezhang2014@gmail.com>
|
||||
zhangxianwei <xianwei.zw@alibaba-inc.com>
|
||||
Zhenan Ye <21551168@zju.edu.cn>
|
||||
@ -2381,6 +2419,7 @@ Zuhayr Elahi <zuhayr.elahi@docker.com>
|
||||
Zunayed Ali <zunayed@gmail.com>
|
||||
Álvaro Lázaro <alvaro.lazaro.g@gmail.com>
|
||||
Átila Camurça Alves <camurca.home@gmail.com>
|
||||
吴小白 <296015668@qq.com>
|
||||
尹吉峰 <jifeng.yin@gmail.com>
|
||||
屈骏 <qujun@tiduyun.com>
|
||||
徐俊杰 <paco.xu@daocloud.io>
|
||||
|
2
vendor/github.com/docker/docker/api/README.md
generated
vendored
2
vendor/github.com/docker/docker/api/README.md
generated
vendored
@ -37,6 +37,6 @@ There is hopefully enough example material in the file for you to copy a similar
|
||||
|
||||
When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly.
|
||||
|
||||
Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
|
||||
Run `make swagger-docs` and a preview will be running at `http://localhost:9000`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation.
|
||||
|
||||
The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io).
|
||||
|
2
vendor/github.com/docker/docker/api/common.go
generated
vendored
2
vendor/github.com/docker/docker/api/common.go
generated
vendored
@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of Current REST API
|
||||
DefaultVersion = "1.43"
|
||||
DefaultVersion = "1.44"
|
||||
|
||||
// NoBaseImageSpecifier is the symbol used by the FROM
|
||||
// command to specify that no base image is to be used.
|
||||
|
7
vendor/github.com/docker/docker/api/common_unix.go
generated
vendored
7
vendor/github.com/docker/docker/api/common_unix.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
const MinVersion = "1.12"
|
8
vendor/github.com/docker/docker/api/common_windows.go
generated
vendored
8
vendor/github.com/docker/docker/api/common_windows.go
generated
vendored
@ -1,8 +0,0 @@
|
||||
package api // import "github.com/docker/docker/api"
|
||||
|
||||
// MinVersion represents Minimum REST API version supported
|
||||
// Technically the first daemon API version released on Windows is v1.25 in
|
||||
// engine version 1.13. However, some clients are explicitly using downlevel
|
||||
// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive.
|
||||
// Hence also allowing 1.24 on Windows.
|
||||
const MinVersion string = "1.24"
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user