mirror of
https://github.com/linuxkit/linuxkit.git
synced 2025-10-22 22:48:26 +00:00
Bump buildkit (#4128)
* bump buildkit to v0.23.1 Signed-off-by: Avi Deitcher <avi@deitcher.net> * bump buldkit library and deps to v0.23.1 Signed-off-by: Avi Deitcher <avi@deitcher.net> --------- Signed-off-by: Avi Deitcher <avi@deitcher.net>
This commit is contained in:
@@ -13,22 +13,22 @@ require (
|
||||
github.com/Microsoft/go-winio v0.6.2
|
||||
github.com/ScaleFT/sshkeys v0.0.0-20181112160850-82451a803681
|
||||
github.com/aws/aws-sdk-go v1.44.82
|
||||
github.com/docker/cli v28.0.0-rc.2+incompatible
|
||||
github.com/docker/docker v28.0.0-rc.2+incompatible
|
||||
github.com/docker/cli v28.2.2+incompatible
|
||||
github.com/docker/docker v28.2.2+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/google/go-containerregistry v0.20.3
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/gophercloud/gophercloud v0.1.0
|
||||
github.com/gophercloud/utils v0.0.0-20181029231510-34f5991525d1
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/moby/buildkit v0.20.0
|
||||
github.com/moby/buildkit v0.23.1
|
||||
github.com/moby/hyperkit v0.0.0-20180416161519-d65b09c1c28a
|
||||
//github.com/moby/moby v20.10.3-0.20220728162118-71cb54cec41e+incompatible // indirect
|
||||
github.com/moby/term v0.5.2
|
||||
github.com/moby/vpnkit v0.4.1-0.20200311130018-2ffc1dd8a84e
|
||||
github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/opencontainers/runtime-spec v1.2.0
|
||||
github.com/opencontainers/image-spec v1.1.1
|
||||
github.com/opencontainers/runtime-spec v1.2.1
|
||||
github.com/pkg/term v1.1.0
|
||||
github.com/radu-matei/azure-sdk-for-go v5.0.0-beta.0.20161118192335-3b1282355199+incompatible
|
||||
github.com/radu-matei/azure-vhd-utils v0.0.0-20170531165126-e52754d5569d
|
||||
@@ -39,33 +39,32 @@ require (
|
||||
github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc
|
||||
github.com/vmware/govmomi v0.20.3
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
golang.org/x/crypto v0.31.0
|
||||
golang.org/x/net v0.33.0
|
||||
golang.org/x/oauth2 v0.25.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/term v0.27.0
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/oauth2 v0.27.0
|
||||
golang.org/x/sync v0.14.0
|
||||
golang.org/x/sys v0.33.0
|
||||
golang.org/x/term v0.31.0
|
||||
google.golang.org/api v0.149.0
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Code-Hex/vz/v3 v3.0.0
|
||||
github.com/containerd/containerd/v2 v2.0.2
|
||||
github.com/containerd/containerd/v2 v2.1.3
|
||||
github.com/containerd/platforms v1.0.0-rc.1
|
||||
github.com/docker/buildx v0.21.1
|
||||
github.com/equinix/equinix-sdk-go v0.42.0
|
||||
github.com/hashicorp/go-version v1.7.0
|
||||
github.com/in-toto/in-toto-golang v0.5.0
|
||||
github.com/in-toto/in-toto-golang v0.9.0
|
||||
github.com/moby/sys/capability v0.3.0
|
||||
github.com/spdx/tools-golang v0.5.3
|
||||
github.com/spdx/tools-golang v0.5.5
|
||||
github.com/spf13/cobra v1.8.1
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute/metadata v0.6.0 // indirect
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
|
||||
github.com/Azure/go-autorest v14.2.1-0.20210115164004-c0fe8b0fea3d+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
|
||||
@@ -73,8 +72,8 @@ require (
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect
|
||||
github.com/containerd/console v1.0.4 // indirect
|
||||
github.com/containerd/containerd/api v1.8.0 // indirect
|
||||
github.com/containerd/console v1.0.5 // indirect
|
||||
github.com/containerd/containerd/api v1.9.0 // indirect
|
||||
github.com/containerd/continuity v0.4.5 // indirect
|
||||
github.com/containerd/errdefs v1.0.0 // indirect
|
||||
github.com/containerd/errdefs/pkg v0.3.0 // indirect
|
||||
@@ -87,7 +86,7 @@ require (
|
||||
github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect
|
||||
github.com/distribution/reference v0.6.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.2 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.9.3 // indirect
|
||||
github.com/docker/go-connections v0.5.0 // indirect
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/go-logr/logr v1.4.2 // indirect
|
||||
@@ -97,19 +96,19 @@ require (
|
||||
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.4 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b // indirect
|
||||
@@ -123,33 +122,33 @@ require (
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect
|
||||
github.com/shibumi/go-pathspec v1.3.0 // indirect
|
||||
github.com/smartystreets/goconvey v1.8.1 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a // indirect
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f // indirect
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
||||
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect
|
||||
github.com/vbatts/tar-split v0.11.6 // indirect
|
||||
github.com/vbatts/tar-split v0.12.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
|
||||
go.opentelemetry.io/otel v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
google.golang.org/grpc v1.69.4 // indirect
|
||||
google.golang.org/protobuf v1.36.3 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
|
||||
go.opentelemetry.io/otel v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.35.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/text v0.24.0 // indirect
|
||||
golang.org/x/time v0.11.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect
|
||||
google.golang.org/grpc v1.72.2 // indirect
|
||||
google.golang.org/protobuf v1.36.6 // indirect
|
||||
)
|
||||
|
@@ -3,8 +3,6 @@ cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4
|
||||
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw=
|
||||
github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c=
|
||||
github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
@@ -33,8 +31,8 @@ github.com/Code-Hex/vz/v3 v3.0.0 h1:UuYAZz8NF8n+R4MLfn/lpUlepgzZ8BlhKtioQ+q9LXk=
|
||||
github.com/Code-Hex/vz/v3 v3.0.0/go.mod h1:+xPQOXVzNaZ4OeIIhlJFumtbFhsvRfqKwX7wuZS4dFA=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
|
||||
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
|
||||
github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA=
|
||||
github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok=
|
||||
github.com/ScaleFT/sshkeys v0.0.0-20181112160850-82451a803681 h1:JS2rl38kZmHgWa0xINSaSYH0Whtvem64/4+Ef0+Y5pE=
|
||||
github.com/ScaleFT/sshkeys v0.0.0-20181112160850-82451a803681/go.mod h1:WfDateMPQ/55dPbZRp5Zxrux5WiEaHsjk9puUhz0KgY=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
@@ -52,12 +50,12 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0=
|
||||
github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc=
|
||||
github.com/containerd/containerd/v2 v2.0.2 h1:GmH/tRBlTvrXOLwSpWE2vNAm8+MqI6nmxKpKBNKY8Wc=
|
||||
github.com/containerd/containerd/v2 v2.0.2/go.mod h1:wIqEvQ/6cyPFUGJ5yMFanspPabMLor+bF865OHvNTTI=
|
||||
github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc=
|
||||
github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0=
|
||||
github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI=
|
||||
github.com/containerd/containerd/v2 v2.1.3 h1:eMD2SLcIQPdMlnlNF6fatlrlRLAeDaiGPGwmRKLZKNs=
|
||||
github.com/containerd/containerd/v2 v2.1.3/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM=
|
||||
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
|
||||
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
|
||||
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
|
||||
@@ -68,8 +66,8 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY
|
||||
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
|
||||
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
|
||||
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
|
||||
github.com/containerd/nydus-snapshotter v0.15.0 h1:RqZRs1GPeM6T3wmuxJV9u+2Rg4YETVMwTmiDeX+iWC8=
|
||||
github.com/containerd/nydus-snapshotter v0.15.0/go.mod h1:biq0ijpeZe0I5yZFSJyHzFSjjRZQ7P7y/OuHyd7hYOw=
|
||||
github.com/containerd/nydus-snapshotter v0.15.2 h1:qsHI4M+Wwrf6Jr4eBqhNx8qh+YU0dSiJ+WPmcLFWNcg=
|
||||
github.com/containerd/nydus-snapshotter v0.15.2/go.mod h1:FfwH2KBkNYoisK/e+KsmNr7xTU53DmnavQHMFOcXwfM=
|
||||
github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E=
|
||||
github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4=
|
||||
github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y=
|
||||
@@ -96,14 +94,14 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/buildx v0.21.1 h1:YjV2k6CsSDbkDTOMsjARUIrj2xv+zZR+M2dtrRyzXhg=
|
||||
github.com/docker/buildx v0.21.1/go.mod h1:8V4UMnlKsaGYwz83BygmIbJIFEAYGHT6KAv8akDZmqo=
|
||||
github.com/docker/cli v28.0.0-rc.2+incompatible h1:2N1dpr3qtlJwIQpqXm7oNwWNAUGzpKlsCeJ32ejvpTk=
|
||||
github.com/docker/cli v28.0.0-rc.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A=
|
||||
github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v28.0.0-rc.2+incompatible h1:p+Ri+C0mmbPkhYVD9Sxnp/TnNnZoQWEj/EwOC465Uq4=
|
||||
github.com/docker/docker v28.0.0-rc.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw=
|
||||
github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8=
|
||||
github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo=
|
||||
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
|
||||
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
@@ -116,8 +114,6 @@ github.com/equinix/equinix-sdk-go v0.42.0 h1:jjgdFs0rx6nOwsu/dLh6ImopD0M1Rn7QIn9
|
||||
github.com/equinix/equinix-sdk-go v0.42.0/go.mod h1:hEb3XLaedz7xhl/dpPIS6eOIiXNPeqNiVoyDrT6paIg=
|
||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
|
||||
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
|
||||
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -154,8 +150,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI=
|
||||
github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI=
|
||||
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
|
||||
@@ -177,8 +174,8 @@ github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25d
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
@@ -188,8 +185,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
|
||||
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY=
|
||||
github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE=
|
||||
github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU=
|
||||
github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
@@ -200,8 +197,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
@@ -216,8 +213,8 @@ github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b h1:9+ke9YJ9KGWw5AN
|
||||
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
|
||||
github.com/moby/buildkit v0.20.0 h1:aF5RujjQ310Pn6SLL/wQYIrSsPXy0sQ5KvWifwq1h8Y=
|
||||
github.com/moby/buildkit v0.20.0/go.mod h1:HYFUIK+iGDRxRgdphZ9Nv0y1Fz7mv0HrU7xZoXx217E=
|
||||
github.com/moby/buildkit v0.23.1 h1:CZtFmPRF+IFG1C8QfPnktGO1Dzzt5JSwtQ5eDqIh+ag=
|
||||
github.com/moby/buildkit v0.23.1/go.mod h1:keNXljNmKX1T0AtM0bMObc8OV6mA9cOuquVbPcRpU/Y=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/hyperkit v0.0.0-20180416161519-d65b09c1c28a h1:hExo7kltIidoitYnXsnqfvkJXG3YEMbHuQbf9nOMvow=
|
||||
@@ -226,6 +223,8 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
|
||||
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=
|
||||
github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=
|
||||
github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg=
|
||||
github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I=
|
||||
github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg=
|
||||
@@ -234,8 +233,8 @@ github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7z
|
||||
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
|
||||
github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
|
||||
github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8=
|
||||
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
|
||||
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
@@ -248,14 +247,12 @@ github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359 h1:gIpgn2fMLvl
|
||||
github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359/go.mod h1:CxM/JGtpRrEPve5H04IhxJrGhxgwxMc6jSP2T4YD60w=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
|
||||
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
|
||||
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
|
||||
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0=
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI=
|
||||
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
|
||||
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
|
||||
github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
|
||||
github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
|
||||
github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww=
|
||||
github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8=
|
||||
github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
@@ -280,8 +277,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6 h1:C1/pvkxkGN/H03mDxLzItaceYJDBk1HdClgR15suAzI=
|
||||
github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA=
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk=
|
||||
github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=
|
||||
github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
@@ -291,8 +288,8 @@ github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+
|
||||
github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY=
|
||||
github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60=
|
||||
github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM=
|
||||
github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY=
|
||||
github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI=
|
||||
github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk=
|
||||
github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE=
|
||||
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
|
||||
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
@@ -300,28 +297,28 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc h1:iA3Eg1OVd2o0M4M+0PBsBBssMz98L8CUH7x0xVkuyUA=
|
||||
github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc/go.mod h1:zaLNaN+EDnfSnNdWPJJf9OZxWF817w5dt8JNzF9LCVI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXWgtcZ6KVaPS+wpWOQMaypczzP8ojkMY=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f h1:MoxeMfHAe5Qj/ySSBfL8A7l1V+hxuluj8owsIEEZipI=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f/go.mod h1:BKdcez7BiVtBvIcef90ZPc6ebqIWr4JWD7+EvLm6J98=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc=
|
||||
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
|
||||
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
|
||||
github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo=
|
||||
github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=
|
||||
github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU=
|
||||
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -337,44 +334,46 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
|
||||
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
|
||||
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0=
|
||||
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
|
||||
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
|
||||
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8=
|
||||
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
|
||||
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
|
||||
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
|
||||
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
|
||||
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk=
|
||||
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
|
||||
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
|
||||
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
|
||||
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
|
||||
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
|
||||
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -387,18 +386,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
|
||||
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
|
||||
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ=
|
||||
golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -412,20 +411,20 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
|
||||
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
|
||||
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
|
||||
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -445,17 +444,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8=
|
||||
google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
@@ -465,8 +464,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
|
||||
google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
|
||||
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
@@ -481,10 +480,4 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
|
||||
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
|
||||
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
|
||||
tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc=
|
||||
tags.cncf.io/container-device-interface v0.8.0/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y=
|
||||
tags.cncf.io/container-device-interface/specs-go v0.8.0 h1:QYGFzGxvYK/ZLMrjhvY0RjpUavIn4KcmRmVP/JjdBTA=
|
||||
tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws=
|
||||
|
@@ -15,7 +15,7 @@ import (
|
||||
const (
|
||||
buildersEnvVar = "LINUXKIT_BUILDERS"
|
||||
envVarCacheDir = "LINUXKIT_CACHE"
|
||||
defaultBuilderImage = "moby/buildkit:v0.20.0"
|
||||
defaultBuilderImage = "moby/buildkit:v0.23.1"
|
||||
)
|
||||
|
||||
// some logic clarification:
|
||||
|
201
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE
generated
vendored
201
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
93
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/README.md
generated
vendored
93
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/README.md
generated
vendored
@@ -1,93 +0,0 @@
|
||||
# go-fuzz-headers
|
||||
This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers.
|
||||
|
||||
|
||||
## Usage
|
||||
Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine:
|
||||
|
||||
```go
|
||||
import (
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
)
|
||||
data := []byte{'R', 'a', 'n', 'd', 'o', 'm'}
|
||||
f := fuzz.NewConsumer(data)
|
||||
|
||||
```
|
||||
|
||||
This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types.
|
||||
|
||||
After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples:
|
||||
|
||||
### Structs
|
||||
One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line:
|
||||
```go
|
||||
type Person struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := Person{}
|
||||
// Fill p with values based on the data provided by the fuzzing engine:
|
||||
err := f.GenerateStruct(&p)
|
||||
```
|
||||
|
||||
This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`:
|
||||
```go
|
||||
type PersonI struct {
|
||||
Name string
|
||||
Age int
|
||||
BestFriend PersonII
|
||||
}
|
||||
type PersonII struct {
|
||||
Name string
|
||||
Age int
|
||||
}
|
||||
p := PersonI{}
|
||||
err := f.GenerateStruct(&p)
|
||||
```
|
||||
|
||||
If the consumer should insert values for unexported fields as well as exported, this can be enabled with:
|
||||
|
||||
```go
|
||||
f.AllowUnexportedFields()
|
||||
```
|
||||
|
||||
...and disabled with:
|
||||
|
||||
```go
|
||||
f.DisallowUnexportedFields()
|
||||
```
|
||||
|
||||
### Other types:
|
||||
|
||||
Other useful APIs:
|
||||
|
||||
```go
|
||||
createdString, err := f.GetString() // Gets a string
|
||||
createdInt, err := f.GetInt() // Gets an integer
|
||||
createdByte, err := f.GetByte() // Gets a byte
|
||||
createdBytes, err := f.GetBytes() // Gets a byte slice
|
||||
createdBool, err := f.GetBool() // Gets a boolean
|
||||
err := f.FuzzMap(target_map) // Fills a map
|
||||
createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive
|
||||
err := f.CreateFiles(inThisDir) // Fills inThisDir with files
|
||||
createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength"
|
||||
```
|
||||
|
||||
Most APIs are added as they are needed.
|
||||
|
||||
## Projects that use go-fuzz-headers
|
||||
- [runC](https://github.com/opencontainers/runc)
|
||||
- [Istio](https://github.com/istio/istio)
|
||||
- [Vitess](https://github.com/vitessio/vitess)
|
||||
- [Containerd](https://github.com/containerd/containerd)
|
||||
|
||||
Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it.
|
||||
|
||||
|
||||
|
||||
|
||||
## Status
|
||||
The project is under development and will be updated regularly.
|
||||
|
||||
## References
|
||||
go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz).
|
960
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
generated
vendored
960
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go
generated
vendored
@@ -1,960 +0,0 @@
|
||||
// Copyright 2023 The go-fuzz-headers Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gofuzzheaders
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var (
|
||||
MaxTotalLen uint32 = 2000000
|
||||
maxDepth = 100
|
||||
)
|
||||
|
||||
func SetMaxTotalLen(newLen uint32) {
|
||||
MaxTotalLen = newLen
|
||||
}
|
||||
|
||||
type ConsumeFuzzer struct {
|
||||
data []byte
|
||||
dataTotal uint32
|
||||
CommandPart []byte
|
||||
RestOfArray []byte
|
||||
NumberOfCalls int
|
||||
position uint32
|
||||
fuzzUnexportedFields bool
|
||||
forceUTF8Strings bool
|
||||
curDepth int
|
||||
Funcs map[reflect.Type]reflect.Value
|
||||
}
|
||||
|
||||
func IsDivisibleBy(n int, divisibleby int) bool {
|
||||
return (n % divisibleby) == 0
|
||||
}
|
||||
|
||||
func NewConsumer(fuzzData []byte) *ConsumeFuzzer {
|
||||
return &ConsumeFuzzer{
|
||||
data: fuzzData,
|
||||
dataTotal: uint32(len(fuzzData)),
|
||||
Funcs: make(map[reflect.Type]reflect.Value),
|
||||
curDepth: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error {
|
||||
if f.dataTotal == 0 {
|
||||
return errors.New("could not split")
|
||||
}
|
||||
numberOfCalls := int(f.data[0])
|
||||
if numberOfCalls < minCalls || numberOfCalls > maxCalls {
|
||||
return errors.New("bad number of calls")
|
||||
}
|
||||
if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 {
|
||||
return errors.New("length of data does not match required parameters")
|
||||
}
|
||||
|
||||
// Define part 2 and 3 of the data array
|
||||
commandPart := f.data[1 : numberOfCalls+1]
|
||||
restOfArray := f.data[numberOfCalls+1:]
|
||||
|
||||
// Just a small check. It is necessary
|
||||
if len(commandPart) != numberOfCalls {
|
||||
return errors.New("length of commandPart does not match number of calls")
|
||||
}
|
||||
|
||||
// Check if restOfArray is divisible by numberOfCalls
|
||||
if !IsDivisibleBy(len(restOfArray), numberOfCalls) {
|
||||
return errors.New("length of commandPart does not match number of calls")
|
||||
}
|
||||
f.CommandPart = commandPart
|
||||
f.RestOfArray = restOfArray
|
||||
f.NumberOfCalls = numberOfCalls
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) AllowUnexportedFields() {
|
||||
f.fuzzUnexportedFields = true
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) DisallowUnexportedFields() {
|
||||
f.fuzzUnexportedFields = false
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) AllowNonUTF8Strings() {
|
||||
f.forceUTF8Strings = false
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) DisallowNonUTF8Strings() {
|
||||
f.forceUTF8Strings = true
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error {
|
||||
e := reflect.ValueOf(targetStruct).Elem()
|
||||
return f.fuzzStruct(e, false)
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) setCustom(v reflect.Value) error {
|
||||
// First: see if we have a fuzz function for it.
|
||||
doCustom, ok := f.Funcs[v.Type()]
|
||||
if !ok {
|
||||
return fmt.Errorf("could not find a custom function")
|
||||
}
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
case reflect.Map:
|
||||
if v.IsNil() {
|
||||
if !v.CanSet() {
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
v.Set(reflect.MakeMap(v.Type()))
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
|
||||
verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
|
||||
F: f,
|
||||
})})
|
||||
|
||||
// check if we return an error
|
||||
if verr[0].IsNil() {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("could not use a custom function")
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error {
|
||||
if f.curDepth >= maxDepth {
|
||||
// return err or nil here?
|
||||
return nil
|
||||
}
|
||||
f.curDepth++
|
||||
defer func() { f.curDepth-- }()
|
||||
|
||||
// We check if we should check for custom functions
|
||||
if customFunctions && e.IsValid() && e.CanAddr() {
|
||||
err := f.setCustom(e.Addr())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
switch e.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := 0; i < e.NumField(); i++ {
|
||||
var v reflect.Value
|
||||
if !e.Field(i).CanSet() {
|
||||
if f.fuzzUnexportedFields {
|
||||
v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem()
|
||||
}
|
||||
if err := f.fuzzStruct(v, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
v = e.Field(i)
|
||||
if err := f.fuzzStruct(v, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.String:
|
||||
str, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetString(str)
|
||||
}
|
||||
case reflect.Slice:
|
||||
var maxElements uint32
|
||||
// Byte slices should not be restricted
|
||||
if e.Type().String() == "[]uint8" {
|
||||
maxElements = 10000000
|
||||
} else {
|
||||
maxElements = 50
|
||||
}
|
||||
|
||||
randQty, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numOfElements := randQty % maxElements
|
||||
if (f.dataTotal - f.position) < numOfElements {
|
||||
numOfElements = f.dataTotal - f.position
|
||||
}
|
||||
|
||||
uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements))
|
||||
|
||||
for i := 0; i < int(numOfElements); i++ {
|
||||
// If we have more than 10, then we can proceed with that.
|
||||
if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil {
|
||||
if i >= 10 {
|
||||
if e.CanSet() {
|
||||
e.Set(uu)
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.Set(uu)
|
||||
}
|
||||
case reflect.Uint:
|
||||
newInt, err := f.GetUint()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Uint16:
|
||||
newInt, err := f.GetUint16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Uint32:
|
||||
newInt, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Uint64:
|
||||
newInt, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(newInt))
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
newInt, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetInt(int64(newInt))
|
||||
}
|
||||
case reflect.Float32:
|
||||
newFloat, err := f.GetFloat32()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetFloat(float64(newFloat))
|
||||
}
|
||||
case reflect.Float64:
|
||||
newFloat, err := f.GetFloat64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetFloat(float64(newFloat))
|
||||
}
|
||||
case reflect.Map:
|
||||
if e.CanSet() {
|
||||
e.Set(reflect.MakeMap(e.Type()))
|
||||
const maxElements = 50
|
||||
randQty, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
numOfElements := randQty % maxElements
|
||||
for i := 0; i < numOfElements; i++ {
|
||||
key := reflect.New(e.Type().Key()).Elem()
|
||||
if err := f.fuzzStruct(key, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
val := reflect.New(e.Type().Elem()).Elem()
|
||||
if err = f.fuzzStruct(val, customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
e.SetMapIndex(key, val)
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if e.CanSet() {
|
||||
e.Set(reflect.New(e.Type().Elem()))
|
||||
if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint8:
|
||||
b, err := f.GetByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetUint(uint64(b))
|
||||
}
|
||||
case reflect.Bool:
|
||||
b, err := f.GetBool()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if e.CanSet() {
|
||||
e.SetBool(b)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) {
|
||||
// The max size of the array:
|
||||
const max uint32 = 20
|
||||
|
||||
arraySize := f.position
|
||||
if arraySize > max {
|
||||
arraySize = max
|
||||
}
|
||||
stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize))
|
||||
if f.position+arraySize >= f.dataTotal {
|
||||
return stringArray, errors.New("could not make string array")
|
||||
}
|
||||
|
||||
for i := 0; i < int(arraySize); i++ {
|
||||
stringSize := uint32(f.data[f.position])
|
||||
if f.position+stringSize >= f.dataTotal {
|
||||
return stringArray, nil
|
||||
}
|
||||
stringToAppend := string(f.data[f.position : f.position+stringSize])
|
||||
strVal := reflect.ValueOf(stringToAppend)
|
||||
stringArray = reflect.Append(stringArray, strVal)
|
||||
f.position += stringSize
|
||||
}
|
||||
return stringArray, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetInt() (int, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return 0, errors.New("not enough bytes to create int")
|
||||
}
|
||||
returnInt := int(f.data[f.position])
|
||||
f.position++
|
||||
return returnInt, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetByte() (byte, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return 0x00, errors.New("not enough bytes to get byte")
|
||||
}
|
||||
returnByte := f.data[f.position]
|
||||
f.position++
|
||||
return returnByte, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return nil, errors.New("not enough bytes to get byte")
|
||||
}
|
||||
returnBytes := make([]byte, 0, numberOfBytes)
|
||||
for i := 0; i < numberOfBytes; i++ {
|
||||
newByte, err := f.GetByte()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
returnBytes = append(returnBytes, newByte)
|
||||
}
|
||||
return returnBytes, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint16() (uint16, error) {
|
||||
u16, err := f.GetNBytes(2)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
return binary.LittleEndian.Uint16(u16), nil
|
||||
}
|
||||
return binary.BigEndian.Uint16(u16), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint32() (uint32, error) {
|
||||
u32, err := f.GetNBytes(4)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return binary.BigEndian.Uint32(u32), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint64() (uint64, error) {
|
||||
u64, err := f.GetNBytes(8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
return binary.LittleEndian.Uint64(u64), nil
|
||||
}
|
||||
return binary.BigEndian.Uint64(u64), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetUint() (uint, error) {
|
||||
var zero uint
|
||||
size := int(unsafe.Sizeof(zero))
|
||||
if size == 8 {
|
||||
u64, err := f.GetUint64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint(u64), nil
|
||||
}
|
||||
u32, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint(u32), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetBytes() ([]byte, error) {
|
||||
var length uint32
|
||||
var err error
|
||||
length, err = f.GetUint32()
|
||||
if err != nil {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
|
||||
if length == 0 {
|
||||
length = 30
|
||||
}
|
||||
bytesLeft := f.dataTotal - f.position
|
||||
if bytesLeft <= 0 {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
|
||||
// If the length is the same as bytes left, we will not overflow
|
||||
// the remaining bytes.
|
||||
if length != bytesLeft {
|
||||
length = length % bytesLeft
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin+length < byteBegin {
|
||||
return nil, errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return f.data[byteBegin:f.position], nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetString() (string, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
length, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if f.position > MaxTotalLen {
|
||||
return "nil", errors.New("created too large a string")
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin >= f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin+length > f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin > byteBegin+length {
|
||||
return "nil", errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
s := string(f.data[byteBegin:f.position])
|
||||
if f.forceUTF8Strings {
|
||||
s = strings.ToValidUTF8(s, "")
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetBool() (bool, error) {
|
||||
if f.position >= f.dataTotal {
|
||||
return false, errors.New("not enough bytes to create bool")
|
||||
}
|
||||
if IsDivisibleBy(int(f.data[f.position]), 2) {
|
||||
f.position++
|
||||
return true, nil
|
||||
} else {
|
||||
f.position++
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) FuzzMap(m interface{}) error {
|
||||
return f.GenerateStruct(m)
|
||||
}
|
||||
|
||||
func returnTarBytes(buf []byte) ([]byte, error) {
|
||||
return buf, nil
|
||||
// Count files
|
||||
var fileCounter int
|
||||
tr := tar.NewReader(bytes.NewReader(buf))
|
||||
for {
|
||||
_, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fileCounter++
|
||||
}
|
||||
if fileCounter >= 1 {
|
||||
return buf, nil
|
||||
}
|
||||
return nil, fmt.Errorf("not enough files were created\n")
|
||||
}
|
||||
|
||||
func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error {
|
||||
ind, err := f.GetInt()
|
||||
if err != nil {
|
||||
hdr.Format = tar.FormatGNU
|
||||
//return nil
|
||||
}
|
||||
switch ind % 4 {
|
||||
case 0:
|
||||
hdr.Format = tar.FormatUnknown
|
||||
case 1:
|
||||
hdr.Format = tar.FormatUSTAR
|
||||
case 2:
|
||||
hdr.Format = tar.FormatPAX
|
||||
case 3:
|
||||
hdr.Format = tar.FormatGNU
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error {
|
||||
ind, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch ind % 13 {
|
||||
case 0:
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
case 1:
|
||||
hdr.Typeflag = tar.TypeLink
|
||||
linkname, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Linkname = linkname
|
||||
case 2:
|
||||
hdr.Typeflag = tar.TypeSymlink
|
||||
linkname, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Linkname = linkname
|
||||
case 3:
|
||||
hdr.Typeflag = tar.TypeChar
|
||||
case 4:
|
||||
hdr.Typeflag = tar.TypeBlock
|
||||
case 5:
|
||||
hdr.Typeflag = tar.TypeDir
|
||||
case 6:
|
||||
hdr.Typeflag = tar.TypeFifo
|
||||
case 7:
|
||||
hdr.Typeflag = tar.TypeCont
|
||||
case 8:
|
||||
hdr.Typeflag = tar.TypeXHeader
|
||||
case 9:
|
||||
hdr.Typeflag = tar.TypeXGlobalHeader
|
||||
case 10:
|
||||
hdr.Typeflag = tar.TypeGNUSparse
|
||||
case 11:
|
||||
hdr.Typeflag = tar.TypeGNULongName
|
||||
case 12:
|
||||
hdr.Typeflag = tar.TypeGNULongLink
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) {
|
||||
return f.GetBytes()
|
||||
/*length, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
|
||||
// A bit of optimization to attempt to create a file body
|
||||
// when we don't have as many bytes left as "length"
|
||||
remainingBytes := f.dataTotal - f.position
|
||||
if remainingBytes <= 0 {
|
||||
return nil, errors.New("created too large a string")
|
||||
}
|
||||
if f.position+length > MaxTotalLen {
|
||||
return nil, errors.New("created too large a string")
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin >= f.dataTotal {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
if length == 0 {
|
||||
return nil, errors.New("zero-length is not supported")
|
||||
}
|
||||
if byteBegin+length >= f.dataTotal {
|
||||
return nil, errors.New("not enough bytes to create byte array")
|
||||
}
|
||||
if byteBegin+length < byteBegin {
|
||||
return nil, errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return f.data[byteBegin:f.position], nil*/
|
||||
}
|
||||
|
||||
// getTarFileName is similar to GetString(), but creates string based
|
||||
// on the length of f.data to reduce the likelihood of overflowing
|
||||
// f.data.
|
||||
func (f *ConsumeFuzzer) getTarFilename() (string, error) {
|
||||
return f.GetString()
|
||||
/*length, err := f.GetUint32()
|
||||
if err != nil {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
|
||||
// A bit of optimization to attempt to create a file name
|
||||
// when we don't have as many bytes left as "length"
|
||||
remainingBytes := f.dataTotal - f.position
|
||||
if remainingBytes <= 0 {
|
||||
return "nil", errors.New("created too large a string")
|
||||
}
|
||||
if f.position > MaxTotalLen {
|
||||
return "nil", errors.New("created too large a string")
|
||||
}
|
||||
byteBegin := f.position
|
||||
if byteBegin >= f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin+length > f.dataTotal {
|
||||
return "nil", errors.New("not enough bytes to create string")
|
||||
}
|
||||
if byteBegin > byteBegin+length {
|
||||
return "nil", errors.New("numbers overflow")
|
||||
}
|
||||
f.position = byteBegin + length
|
||||
return string(f.data[byteBegin:f.position]), nil*/
|
||||
}
|
||||
|
||||
type TarFile struct {
|
||||
Hdr *tar.Header
|
||||
Body []byte
|
||||
}
|
||||
|
||||
// TarBytes returns valid bytes for a tar archive
|
||||
func (f *ConsumeFuzzer) TarBytes() ([]byte, error) {
|
||||
numberOfFiles, err := f.GetInt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tarFiles []*TarFile
|
||||
tarFiles = make([]*TarFile, 0)
|
||||
|
||||
const maxNoOfFiles = 100
|
||||
for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
|
||||
var filename string
|
||||
var filebody []byte
|
||||
var sec, nsec int
|
||||
var err error
|
||||
|
||||
filename, err = f.getTarFilename()
|
||||
if err != nil {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("file-")
|
||||
sb.WriteString(strconv.Itoa(i))
|
||||
filename = sb.String()
|
||||
}
|
||||
filebody, err = f.createTarFileBody()
|
||||
if err != nil {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("filebody-")
|
||||
sb.WriteString(strconv.Itoa(i))
|
||||
filebody = []byte(sb.String())
|
||||
}
|
||||
|
||||
sec, err = f.GetInt()
|
||||
if err != nil {
|
||||
sec = 1672531200 // beginning of 2023
|
||||
}
|
||||
nsec, err = f.GetInt()
|
||||
if err != nil {
|
||||
nsec = 1703980800 // end of 2023
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: filename,
|
||||
Size: int64(len(filebody)),
|
||||
Mode: 0o600,
|
||||
ModTime: time.Unix(int64(sec), int64(nsec)),
|
||||
}
|
||||
if err := setTarHeaderTypeflag(hdr, f); err != nil {
|
||||
return []byte(""), err
|
||||
}
|
||||
if err := setTarHeaderFormat(hdr, f); err != nil {
|
||||
return []byte(""), err
|
||||
}
|
||||
tf := &TarFile{
|
||||
Hdr: hdr,
|
||||
Body: filebody,
|
||||
}
|
||||
tarFiles = append(tarFiles, tf)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
tw := tar.NewWriter(&buf)
|
||||
defer tw.Close()
|
||||
|
||||
for _, tf := range tarFiles {
|
||||
tw.WriteHeader(tf.Hdr)
|
||||
tw.Write(tf.Body)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// This is similar to TarBytes, but it returns a series of
|
||||
// files instead of raw tar bytes. The advantage of this
|
||||
// api is that it is cheaper in terms of cpu power to
|
||||
// modify or check the files in the fuzzer with TarFiles()
|
||||
// because it avoids creating a tar reader.
|
||||
func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) {
|
||||
numberOfFiles, err := f.GetInt()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var tarFiles []*TarFile
|
||||
tarFiles = make([]*TarFile, 0)
|
||||
|
||||
const maxNoOfFiles = 100
|
||||
for i := 0; i < numberOfFiles%maxNoOfFiles; i++ {
|
||||
filename, err := f.getTarFilename()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
filebody, err := f.createTarFileBody()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
|
||||
sec, err := f.GetInt()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
nsec, err := f.GetInt()
|
||||
if err != nil {
|
||||
return tarFiles, err
|
||||
}
|
||||
|
||||
hdr := &tar.Header{
|
||||
Name: filename,
|
||||
Size: int64(len(filebody)),
|
||||
Mode: 0o600,
|
||||
ModTime: time.Unix(int64(sec), int64(nsec)),
|
||||
}
|
||||
if err := setTarHeaderTypeflag(hdr, f); err != nil {
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
}
|
||||
if err := setTarHeaderFormat(hdr, f); err != nil {
|
||||
return tarFiles, err // should not happend
|
||||
}
|
||||
tf := &TarFile{
|
||||
Hdr: hdr,
|
||||
Body: filebody,
|
||||
}
|
||||
tarFiles = append(tarFiles, tf)
|
||||
}
|
||||
return tarFiles, nil
|
||||
}
|
||||
|
||||
// CreateFiles creates pseudo-random files in rootDir.
|
||||
// It creates subdirs and places the files there.
|
||||
// It is the callers responsibility to ensure that
|
||||
// rootDir exists.
|
||||
func (f *ConsumeFuzzer) CreateFiles(rootDir string) error {
|
||||
numberOfFiles, err := f.GetInt()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary
|
||||
if maxNumberOfFiles == 0 {
|
||||
return errors.New("maxNumberOfFiles is nil")
|
||||
}
|
||||
|
||||
var noOfCreatedFiles int
|
||||
for i := 0; i < maxNumberOfFiles; i++ {
|
||||
// The file to create:
|
||||
fileName, err := f.GetString()
|
||||
if err != nil {
|
||||
if noOfCreatedFiles > 0 {
|
||||
// If files have been created, we don't return an error.
|
||||
break
|
||||
} else {
|
||||
return errors.New("could not get fileName")
|
||||
}
|
||||
}
|
||||
if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") {
|
||||
continue
|
||||
}
|
||||
fullFilePath := filepath.Join(rootDir, fileName)
|
||||
|
||||
// Find the subdirectory of the file
|
||||
if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." {
|
||||
// create the dir first; avoid going outside the root dir
|
||||
if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") {
|
||||
continue
|
||||
}
|
||||
dirPath := filepath.Join(rootDir, subDir)
|
||||
if _, err := os.Stat(dirPath); os.IsNotExist(err) {
|
||||
err2 := os.MkdirAll(dirPath, 0o777)
|
||||
if err2 != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
fullFilePath = filepath.Join(dirPath, fileName)
|
||||
} else {
|
||||
// Create symlink
|
||||
createSymlink, err := f.GetBool()
|
||||
if err != nil {
|
||||
if noOfCreatedFiles > 0 {
|
||||
break
|
||||
} else {
|
||||
return errors.New("could not create the symlink")
|
||||
}
|
||||
}
|
||||
if createSymlink {
|
||||
symlinkTarget, err := f.GetString()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.Symlink(symlinkTarget, fullFilePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// stop loop here, since a symlink needs no further action
|
||||
noOfCreatedFiles++
|
||||
continue
|
||||
}
|
||||
// We create a normal file
|
||||
fileContents, err := f.GetBytes()
|
||||
if err != nil {
|
||||
if noOfCreatedFiles > 0 {
|
||||
break
|
||||
} else {
|
||||
return errors.New("could not create the file")
|
||||
}
|
||||
}
|
||||
err = os.WriteFile(fullFilePath, fileContents, 0o666)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
noOfCreatedFiles++
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetStringFrom returns a string that can only consist of characters
|
||||
// included in possibleChars. It returns an error if the created string
|
||||
// does not have the specified length.
|
||||
func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) {
|
||||
if (f.dataTotal - f.position) < uint32(length) {
|
||||
return "", errors.New("not enough bytes to create a string")
|
||||
}
|
||||
output := make([]byte, 0, length)
|
||||
for i := 0; i < length; i++ {
|
||||
charIndex, err := f.GetInt()
|
||||
if err != nil {
|
||||
return string(output), err
|
||||
}
|
||||
output = append(output, possibleChars[charIndex%len(possibleChars)])
|
||||
}
|
||||
return string(output), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetRune() ([]rune, error) {
|
||||
stringToConvert, err := f.GetString()
|
||||
if err != nil {
|
||||
return []rune("nil"), err
|
||||
}
|
||||
return []rune(stringToConvert), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetFloat32() (float32, error) {
|
||||
u32, err := f.GetNBytes(4)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
u32LE := binary.LittleEndian.Uint32(u32)
|
||||
return math.Float32frombits(u32LE), nil
|
||||
}
|
||||
u32BE := binary.BigEndian.Uint32(u32)
|
||||
return math.Float32frombits(u32BE), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GetFloat64() (float64, error) {
|
||||
u64, err := f.GetNBytes(8)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
littleEndian, err := f.GetBool()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if littleEndian {
|
||||
u64LE := binary.LittleEndian.Uint64(u64)
|
||||
return math.Float64frombits(u64LE), nil
|
||||
}
|
||||
u64BE := binary.BigEndian.Uint64(u64)
|
||||
return math.Float64frombits(u64BE), nil
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error {
|
||||
return f.GenerateStruct(targetSlice)
|
||||
}
|
62
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go
generated
vendored
62
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go
generated
vendored
@@ -1,62 +0,0 @@
|
||||
// Copyright 2023 The go-fuzz-headers Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gofuzzheaders
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
type Continue struct {
|
||||
F *ConsumeFuzzer
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) {
|
||||
for i := range fuzzFuncs {
|
||||
v := reflect.ValueOf(fuzzFuncs[i])
|
||||
if v.Kind() != reflect.Func {
|
||||
panic("Need only funcs!")
|
||||
}
|
||||
t := v.Type()
|
||||
if t.NumIn() != 2 || t.NumOut() != 1 {
|
||||
fmt.Println(t.NumIn(), t.NumOut())
|
||||
|
||||
panic("Need 2 in and 1 out params. In must be the type. Out must be an error")
|
||||
}
|
||||
argT := t.In(0)
|
||||
switch argT.Kind() {
|
||||
case reflect.Ptr, reflect.Map:
|
||||
default:
|
||||
panic("fuzzFunc must take pointer or map type")
|
||||
}
|
||||
if t.In(1) != reflect.TypeOf(Continue{}) {
|
||||
panic("fuzzFunc's second parameter must be type Continue")
|
||||
}
|
||||
f.Funcs[argT] = v
|
||||
}
|
||||
}
|
||||
|
||||
func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error {
|
||||
e := reflect.ValueOf(targetStruct).Elem()
|
||||
return f.fuzzStruct(e, true)
|
||||
}
|
||||
|
||||
func (c Continue) GenerateStruct(targetStruct interface{}) error {
|
||||
return c.F.GenerateStruct(targetStruct)
|
||||
}
|
||||
|
||||
func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error {
|
||||
return c.F.GenerateWithCustom(targetStruct)
|
||||
}
|
556
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go
generated
vendored
556
src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go
generated
vendored
@@ -1,556 +0,0 @@
|
||||
// Copyright 2023 The go-fuzz-headers Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gofuzzheaders
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// returns a keyword by index
|
||||
func getKeyword(f *ConsumeFuzzer) (string, error) {
|
||||
index, err := f.GetInt()
|
||||
if err != nil {
|
||||
return keywords[0], err
|
||||
}
|
||||
for i, k := range keywords {
|
||||
if i == index {
|
||||
return k, nil
|
||||
}
|
||||
}
|
||||
return keywords[0], fmt.Errorf("could not get a kw")
|
||||
}
|
||||
|
||||
// Simple utility function to check if a string
|
||||
// slice contains a string.
|
||||
func containsString(s []string, e string) bool {
|
||||
for _, a := range s {
|
||||
if a == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// These keywords are used specifically for fuzzing Vitess
|
||||
var keywords = []string{
|
||||
"accessible", "action", "add", "after", "against", "algorithm",
|
||||
"all", "alter", "always", "analyze", "and", "as", "asc", "asensitive",
|
||||
"auto_increment", "avg_row_length", "before", "begin", "between",
|
||||
"bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit",
|
||||
"blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade",
|
||||
"cascaded", "case", "cast", "channel", "change", "char", "character",
|
||||
"charset", "check", "checksum", "coalesce", "code", "collate", "collation",
|
||||
"column", "columns", "comment", "committed", "commit", "compact", "complete",
|
||||
"compressed", "compression", "condition", "connection", "constraint", "continue",
|
||||
"convert", "copy", "cume_dist", "substr", "substring", "create", "cross",
|
||||
"csv", "current_date", "current_time", "current_timestamp", "current_user",
|
||||
"cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond",
|
||||
"day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare",
|
||||
"default", "definer", "delay_key_write", "delayed", "delete", "dense_rank",
|
||||
"desc", "describe", "deterministic", "directory", "disable", "discard",
|
||||
"disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile",
|
||||
"duplicate", "dynamic", "each", "else", "elseif", "empty", "enable",
|
||||
"enclosed", "encryption", "end", "enforced", "engine", "engines", "enum",
|
||||
"error", "escape", "escaped", "event", "exchange", "exclusive", "exists",
|
||||
"exit", "explain", "expansion", "export", "extended", "extract", "false",
|
||||
"fetch", "fields", "first", "first_value", "fixed", "float", "float4",
|
||||
"float8", "flush", "for", "force", "foreign", "format", "from", "full",
|
||||
"fulltext", "function", "general", "generated", "geometry", "geometrycollection",
|
||||
"get", "global", "gtid_executed", "grant", "group", "grouping", "groups",
|
||||
"group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond",
|
||||
"hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes",
|
||||
"infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method",
|
||||
"int", "int1", "int2", "int3", "int4", "int8", "integer", "interval",
|
||||
"into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join",
|
||||
"json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag",
|
||||
"language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading",
|
||||
"leave", "left", "less", "level", "like", "limit", "linear", "lines",
|
||||
"linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs",
|
||||
"long", "longblob", "longtext", "loop", "low_priority", "manifest",
|
||||
"master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint",
|
||||
"mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute",
|
||||
"minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies",
|
||||
"multilinestring", "multipoint", "multipolygon", "month", "name",
|
||||
"names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog",
|
||||
"nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on",
|
||||
"only", "open", "optimize", "optimizer_costs", "option", "optionally",
|
||||
"or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys",
|
||||
"parser", "partition", "partitioning", "password", "percent_rank", "plugins",
|
||||
"point", "polygon", "precision", "primary", "privileges", "processlist",
|
||||
"procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write",
|
||||
"real", "rebuild", "recursive", "redundant", "references", "regexp", "relay",
|
||||
"release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable",
|
||||
"replace", "require", "resignal", "restrict", "return", "retry", "revert",
|
||||
"revoke", "right", "rlike", "rollback", "row", "row_format", "row_number",
|
||||
"rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond",
|
||||
"security", "select", "sensitive", "separator", "sequence", "serializable",
|
||||
"session", "set", "share", "shared", "show", "signal", "signed", "slow",
|
||||
"smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate",
|
||||
"sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows",
|
||||
"sql_no_cache", "sql_small_result", "ssl", "start", "starting",
|
||||
"stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status",
|
||||
"storage", "stored", "straight_join", "stream", "system", "vstream",
|
||||
"table", "tables", "tablespace", "temporary", "temptable", "terminated",
|
||||
"text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff",
|
||||
"tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree",
|
||||
"traditional", "trigger", "triggers", "true", "truncate", "uncommitted",
|
||||
"undefined", "undo", "union", "unique", "unlock", "unsigned", "update",
|
||||
"upgrade", "usage", "use", "user", "user_resources", "using", "utc_date",
|
||||
"utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary",
|
||||
"varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex",
|
||||
"vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata",
|
||||
"vitess_migration", "vitess_migrations", "vitess_replication_status",
|
||||
"vitess_shards", "vitess_tablets", "vschema", "warnings", "when",
|
||||
"where", "while", "window", "with", "without", "work", "write", "xor",
|
||||
"year", "year_month", "zerofill",
|
||||
}
|
||||
|
||||
// Keywords that could get an additional keyword
|
||||
var needCustomString = []string{
|
||||
"DISTINCTROW", "FROM", // Select keywords:
|
||||
"GROUP BY", "HAVING", "WINDOW",
|
||||
"FOR",
|
||||
"ORDER BY", "LIMIT",
|
||||
"INTO", "PARTITION", "AS", // Insert Keywords:
|
||||
"ON DUPLICATE KEY UPDATE",
|
||||
"WHERE", "LIMIT", // Delete keywords
|
||||
"INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords
|
||||
"TERMINATED BY", "ENCLOSED BY",
|
||||
"ESCAPED BY", "STARTING BY",
|
||||
"TERMINATED BY", "STARTING BY",
|
||||
"IGNORE",
|
||||
"VALUE", "VALUES", // Replace tokens
|
||||
"SET", // Update tokens
|
||||
"ENGINE =", // Drop tokens
|
||||
"DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens
|
||||
"COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS",
|
||||
}
|
||||
|
||||
var alterTableTokens = [][]string{
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"CUSTOM_ALTTER_TABLE_OPTIONS"},
|
||||
{"PARTITION_OPTIONS_FOR_ALTER_TABLE"},
|
||||
}
|
||||
|
||||
var alterTokens = [][]string{
|
||||
{
|
||||
"DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE",
|
||||
"LOGFILE GROUP", "PROCEDURE", "SERVER",
|
||||
},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{
|
||||
"ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE",
|
||||
"ADD UNDOFILE", "OPTIONS",
|
||||
},
|
||||
{"RENAME TO", "INITIAL_SIZE = "},
|
||||
{"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"},
|
||||
{"COMMENT"},
|
||||
{"DO"},
|
||||
}
|
||||
|
||||
var setTokens = [][]string{
|
||||
{"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"},
|
||||
{"CUSTOM_FUZZ_STRING", "DEFAULT", "="},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
var dropTokens = [][]string{
|
||||
{"TEMPORARY", "UNDO"},
|
||||
{
|
||||
"DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP",
|
||||
"PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM",
|
||||
"TABLE", "TABLESPACE", "TRIGGER", "VIEW",
|
||||
},
|
||||
{"IF EXISTS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"ON", "ENGINE = ", "RESTRICT", "CASCADE"},
|
||||
}
|
||||
|
||||
var renameTokens = [][]string{
|
||||
{"TABLE"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"TO"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
var truncateTokens = [][]string{
|
||||
{"TABLE"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
var createTokens = [][]string{
|
||||
{"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system
|
||||
{
|
||||
"UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE",
|
||||
"ALGORITHM = TEMPTABLE",
|
||||
},
|
||||
{
|
||||
"DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP",
|
||||
"PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE",
|
||||
"TRIGGER", "VIEW",
|
||||
},
|
||||
{"IF NOT EXISTS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
/*
|
||||
// For future use.
|
||||
var updateTokens = [][]string{
|
||||
{"LOW_PRIORITY"},
|
||||
{"IGNORE"},
|
||||
{"SET"},
|
||||
{"WHERE"},
|
||||
{"ORDER BY"},
|
||||
{"LIMIT"},
|
||||
}
|
||||
*/
|
||||
|
||||
var replaceTokens = [][]string{
|
||||
{"LOW_PRIORITY", "DELAYED"},
|
||||
{"INTO"},
|
||||
{"PARTITION"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"VALUES", "VALUE"},
|
||||
}
|
||||
|
||||
var loadTokens = [][]string{
|
||||
{"DATA"},
|
||||
{"LOW_PRIORITY", "CONCURRENT", "LOCAL"},
|
||||
{"INFILE"},
|
||||
{"REPLACE", "IGNORE"},
|
||||
{"INTO TABLE"},
|
||||
{"PARTITION"},
|
||||
{"CHARACTER SET"},
|
||||
{"FIELDS", "COLUMNS"},
|
||||
{"TERMINATED BY"},
|
||||
{"OPTIONALLY"},
|
||||
{"ENCLOSED BY"},
|
||||
{"ESCAPED BY"},
|
||||
{"LINES"},
|
||||
{"STARTING BY"},
|
||||
{"TERMINATED BY"},
|
||||
{"IGNORE"},
|
||||
{"LINES", "ROWS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
}
|
||||
|
||||
// These Are everything that comes after "INSERT"
|
||||
var insertTokens = [][]string{
|
||||
{"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"},
|
||||
{"INTO"},
|
||||
{"PARTITION"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"AS"},
|
||||
{"ON DUPLICATE KEY UPDATE"},
|
||||
}
|
||||
|
||||
// These are everything that comes after "SELECT"
|
||||
var selectTokens = [][]string{
|
||||
{"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"},
|
||||
{"HIGH_PRIORITY"},
|
||||
{"STRAIGHT_JOIN"},
|
||||
{"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"},
|
||||
{"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"FROM"},
|
||||
{"WHERE"},
|
||||
{"GROUP BY"},
|
||||
{"HAVING"},
|
||||
{"WINDOW"},
|
||||
{"ORDER BY"},
|
||||
{"LIMIT"},
|
||||
{"CUSTOM_FUZZ_STRING"},
|
||||
{"FOR"},
|
||||
}
|
||||
|
||||
// These are everything that comes after "DELETE"
|
||||
var deleteTokens = [][]string{
|
||||
{"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"},
|
||||
{"PARTITION"},
|
||||
{"WHERE"},
|
||||
{"ORDER BY"},
|
||||
{"LIMIT"},
|
||||
}
|
||||
|
||||
var alter_table_options = []string{
|
||||
"ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL",
|
||||
"CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER",
|
||||
"NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE",
|
||||
"CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK",
|
||||
"FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN",
|
||||
"AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION",
|
||||
"DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION",
|
||||
"COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION",
|
||||
"ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION",
|
||||
"REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT",
|
||||
"KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH",
|
||||
"CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT",
|
||||
"COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT",
|
||||
"STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS",
|
||||
"MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY",
|
||||
"DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION",
|
||||
}
|
||||
|
||||
// Creates an 'alter table' statement. 'alter table' is an exception
|
||||
// in that it has its own function. The majority of statements
|
||||
// are created by 'createStmt()'.
|
||||
func createAlterTableStmt(f *ConsumeFuzzer) (string, error) {
|
||||
maxArgs, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
maxArgs = maxArgs % 30
|
||||
if maxArgs == 0 {
|
||||
return "", fmt.Errorf("could not create alter table stmt")
|
||||
}
|
||||
|
||||
var stmt strings.Builder
|
||||
stmt.WriteString("ALTER TABLE ")
|
||||
for i := 0; i < maxArgs; i++ {
|
||||
// Calculate if we get existing token or custom string
|
||||
tokenType, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if tokenType%4 == 1 {
|
||||
customString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stmt.WriteString(" " + customString)
|
||||
} else {
|
||||
tokenIndex, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)])
|
||||
}
|
||||
}
|
||||
return stmt.String(), nil
|
||||
}
|
||||
|
||||
func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) {
|
||||
index, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var token strings.Builder
|
||||
token.WriteString(tokens[index%len(tokens)])
|
||||
if token.String() == "CUSTOM_FUZZ_STRING" {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return customFuzzString, nil
|
||||
}
|
||||
|
||||
// Check if token requires an argument
|
||||
if containsString(needCustomString, token.String()) {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
token.WriteString(" " + customFuzzString)
|
||||
}
|
||||
return token.String(), nil
|
||||
}
|
||||
|
||||
var stmtTypes = map[string][][]string{
|
||||
"DELETE": deleteTokens,
|
||||
"INSERT": insertTokens,
|
||||
"SELECT": selectTokens,
|
||||
"LOAD": loadTokens,
|
||||
"REPLACE": replaceTokens,
|
||||
"CREATE": createTokens,
|
||||
"DROP": dropTokens,
|
||||
"RENAME": renameTokens,
|
||||
"TRUNCATE": truncateTokens,
|
||||
"SET": setTokens,
|
||||
"ALTER": alterTokens,
|
||||
"ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens
|
||||
}
|
||||
|
||||
var stmtTypeEnum = map[int]string{
|
||||
0: "DELETE",
|
||||
1: "INSERT",
|
||||
2: "SELECT",
|
||||
3: "LOAD",
|
||||
4: "REPLACE",
|
||||
5: "CREATE",
|
||||
6: "DROP",
|
||||
7: "RENAME",
|
||||
8: "TRUNCATE",
|
||||
9: "SET",
|
||||
10: "ALTER",
|
||||
11: "ALTER TABLE",
|
||||
}
|
||||
|
||||
func createStmt(f *ConsumeFuzzer) (string, error) {
|
||||
stmtIndex, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
stmtIndex = stmtIndex % len(stmtTypes)
|
||||
|
||||
queryType := stmtTypeEnum[stmtIndex]
|
||||
tokens := stmtTypes[queryType]
|
||||
|
||||
// We have custom creator for ALTER TABLE
|
||||
if queryType == "ALTER TABLE" {
|
||||
query, err := createAlterTableStmt(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return query, nil
|
||||
}
|
||||
|
||||
// Here we are creating a query that is not
|
||||
// an 'alter table' query. For available
|
||||
// queries, see "stmtTypes"
|
||||
|
||||
// First specify the first query keyword:
|
||||
var query strings.Builder
|
||||
query.WriteString(queryType)
|
||||
|
||||
// Next create the args for the
|
||||
queryArgs, err := createStmtArgs(tokens, f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + queryArgs)
|
||||
return query.String(), nil
|
||||
}
|
||||
|
||||
// Creates the arguments of a statements. In a select statement
|
||||
// that would be everything after "select".
|
||||
func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) {
|
||||
var query, token strings.Builder
|
||||
|
||||
// We go through the tokens in the tokenslice,
|
||||
// create the respective token and add it to
|
||||
// "query"
|
||||
for _, tokens := range tokenslice {
|
||||
// For extra randomization, the fuzzer can
|
||||
// choose to not include this token.
|
||||
includeThisToken, err := f.GetBool()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !includeThisToken {
|
||||
continue
|
||||
}
|
||||
|
||||
// There may be several tokens to choose from:
|
||||
if len(tokens) > 1 {
|
||||
chosenToken, err := chooseToken(tokens, f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + chosenToken)
|
||||
} else {
|
||||
token.WriteString(tokens[0])
|
||||
|
||||
// In case the token is "CUSTOM_FUZZ_STRING"
|
||||
// we will then create a non-structured string
|
||||
if token.String() == "CUSTOM_FUZZ_STRING" {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + customFuzzString)
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if token requires an argument.
|
||||
// Tokens that take an argument can be found
|
||||
// in 'needCustomString'. If so, we add a
|
||||
// non-structured string to the token.
|
||||
if containsString(needCustomString, token.String()) {
|
||||
customFuzzString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
token.WriteString(fmt.Sprintf(" %s", customFuzzString))
|
||||
}
|
||||
query.WriteString(fmt.Sprintf(" %s", token.String()))
|
||||
}
|
||||
}
|
||||
return query.String(), nil
|
||||
}
|
||||
|
||||
// Creates a semi-structured query. It creates a string
|
||||
// that is a combination of the keywords and random strings.
|
||||
func createQuery(f *ConsumeFuzzer) (string, error) {
|
||||
queryLen, err := f.GetInt()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
maxLen := queryLen % 60
|
||||
if maxLen == 0 {
|
||||
return "", fmt.Errorf("could not create a query")
|
||||
}
|
||||
var query strings.Builder
|
||||
for i := 0; i < maxLen; i++ {
|
||||
// Get a new token:
|
||||
useKeyword, err := f.GetBool()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if useKeyword {
|
||||
keyword, err := getKeyword(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + keyword)
|
||||
} else {
|
||||
customString, err := f.GetString()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
query.WriteString(" " + customString)
|
||||
}
|
||||
}
|
||||
if query.String() == "" {
|
||||
return "", fmt.Errorf("could not create a query")
|
||||
}
|
||||
return query.String(), nil
|
||||
}
|
||||
|
||||
// GetSQLString is the API that users interact with.
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// f := NewConsumer(data)
|
||||
// sqlString, err := f.GetSQLString()
|
||||
func (f *ConsumeFuzzer) GetSQLString() (string, error) {
|
||||
var query string
|
||||
veryStructured, err := f.GetBool()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if veryStructured {
|
||||
query, err = createStmt(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
query, err = createQuery(f)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return query, nil
|
||||
}
|
4
src/cmd/linuxkit/vendor/github.com/containerd/console/console_other.go
generated
vendored
4
src/cmd/linuxkit/vendor/github.com/containerd/console/console_other.go
generated
vendored
@@ -1,5 +1,5 @@
|
||||
//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
|
||||
// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
|
||||
//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !windows && !zos
|
||||
// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!windows,!zos
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
9
src/cmd/linuxkit/vendor/github.com/containerd/console/console_unix.go
generated
vendored
9
src/cmd/linuxkit/vendor/github.com/containerd/console/console_unix.go
generated
vendored
@@ -31,6 +31,15 @@ func NewPty() (Console, string, error) {
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return NewPtyFromFile(f)
|
||||
}
|
||||
|
||||
// NewPtyFromFile creates a new pty pair, just like [NewPty] except that the
|
||||
// provided [os.File] is used as the master rather than automatically creating
|
||||
// a new master from /dev/ptmx. The ownership of [os.File] is passed to the
|
||||
// returned [Console], so the caller must be careful to not call Close on the
|
||||
// underlying file.
|
||||
func NewPtyFromFile(f File) (Console, string, error) {
|
||||
slave, err := ptsname(f)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_darwin.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_darwin.go
generated
vendored
@@ -18,7 +18,6 @@ package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -30,12 +29,12 @@ const (
|
||||
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
return unix.IoctlSetPointerInt(int(f.Fd()), unix.TIOCPTYUNLK, 0)
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_cgo.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_cgo.go
generated
vendored
@@ -21,7 +21,6 @@ package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -39,7 +38,7 @@ const (
|
||||
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
fd := C.int(f.Fd())
|
||||
if _, err := C.unlockpt(fd); err != nil {
|
||||
C.close(fd)
|
||||
@@ -49,7 +48,7 @@ func unlockpt(f *os.File) error {
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_nocgo.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_nocgo.go
generated
vendored
@@ -21,7 +21,6 @@ package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -42,12 +41,12 @@ const (
|
||||
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
panic("unlockpt() support requires cgo.")
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_linux.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_linux.go
generated
vendored
@@ -18,7 +18,6 @@ package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -31,7 +30,7 @@ const (
|
||||
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
var u int32
|
||||
// XXX do not use unix.IoctlSetPointerInt here, see commit dbd69c59b81.
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 {
|
||||
@@ -41,7 +40,7 @@ func unlockpt(f *os.File) error {
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
var u uint32
|
||||
// XXX do not use unix.IoctlGetInt here, see commit dbd69c59b81.
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 {
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_netbsd.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_netbsd.go
generated
vendored
@@ -18,7 +18,6 @@ package console
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
@@ -31,12 +30,12 @@ const (
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
// This does not exist on NetBSD, it does not allocate controlling terminals on open
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
ptm, err := unix.IoctlGetPtmget(int(f.Fd()), unix.TIOCPTSNAME)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
6
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_cgo.go
generated
vendored
6
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_cgo.go
generated
vendored
@@ -20,8 +20,6 @@
|
||||
package console
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -34,7 +32,7 @@ const (
|
||||
)
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
ptspath, err := C.ptsname(C.int(f.Fd()))
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -44,7 +42,7 @@ func ptsname(f *os.File) (string, error) {
|
||||
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
6
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_nocgo.go
generated
vendored
6
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_nocgo.go
generated
vendored
@@ -29,8 +29,6 @@
|
||||
package console
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -39,10 +37,10 @@ const (
|
||||
cmdTcSet = unix.TIOCSETA
|
||||
)
|
||||
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
panic("ptsname() support requires cgo.")
|
||||
}
|
||||
|
||||
func unlockpt(f *os.File) error {
|
||||
func unlockpt(f File) error {
|
||||
panic("unlockpt() support requires cgo.")
|
||||
}
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_zos.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/containerd/console/tc_zos.go
generated
vendored
@@ -17,7 +17,6 @@
|
||||
package console
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -29,11 +28,11 @@ const (
|
||||
)
|
||||
|
||||
// unlockpt is a no-op on zos.
|
||||
func unlockpt(_ *os.File) error {
|
||||
func unlockpt(File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
func ptsname(f File) (string, error) {
|
||||
return "/dev/ttyp" + strings.TrimPrefix(f.Name(), "/dev/ptyp"), nil
|
||||
}
|
||||
|
2
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/helpers.go
generated
vendored
2
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/helpers.go
generated
vendored
@@ -200,7 +200,7 @@ func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected dig
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
return fmt.Errorf("short read: expected %d bytes but got %d: %w", size-ws.Offset, copied, io.ErrUnexpectedEOF)
|
||||
}
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if errors.Is(err, ErrReset) {
|
||||
|
@@ -20,12 +20,14 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/errdefs/pkg/errgrpc"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/defaults"
|
||||
"github.com/containerd/containerd/v2/pkg/protobuf"
|
||||
)
|
||||
|
||||
@@ -76,27 +78,31 @@ func (rw *remoteWriter) Digest() digest.Digest {
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
offset := rw.offset
|
||||
const maxBufferSize = defaults.DefaultMaxSendMsgSize >> 1
|
||||
for data := range slices.Chunk(p, maxBufferSize) {
|
||||
offset := rw.offset
|
||||
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteAction_WRITE,
|
||||
Offset: offset,
|
||||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to send write: %w", errgrpc.ToNative(err))
|
||||
}
|
||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
||||
Action: contentapi.WriteAction_WRITE,
|
||||
Offset: offset,
|
||||
Data: data,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to send write: %w", errgrpc.ToNative(err))
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
if n < len(p) {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
written := int(resp.Offset - offset)
|
||||
rw.offset += int64(written)
|
||||
if resp.Digest != "" {
|
||||
rw.digest = digest.Digest(resp.Digest)
|
||||
}
|
||||
n += written
|
||||
|
||||
rw.offset += int64(n)
|
||||
if resp.Digest != "" {
|
||||
rw.digest = digest.Digest(resp.Digest)
|
||||
if written < len(data) {
|
||||
return n, io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
return
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) (err error) {
|
||||
|
2
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/handlers.go
generated
vendored
2
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/handlers.go
generated
vendored
@@ -154,8 +154,6 @@ func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descrip
|
||||
func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
|
||||
eg, ctx2 := errgroup.WithContext(ctx)
|
||||
for _, desc := range descs {
|
||||
desc := desc
|
||||
|
||||
if limiter != nil {
|
||||
if err := limiter.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
|
4
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/image.go
generated
vendored
4
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/image.go
generated
vendored
@@ -369,8 +369,8 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
}
|
||||
|
||||
return append([]ocispec.Descriptor{}, index.Manifests...), nil
|
||||
} else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) {
|
||||
// Layers and configs are childless data types and should not be logged.
|
||||
} else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) && !IsAttestationType(desc.MediaType) {
|
||||
// Layers, configs, and attestations are childless data types and should not be logged.
|
||||
log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
}
|
||||
return nil, nil
|
||||
|
21
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/labels.go
generated
vendored
21
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/labels.go
generated
vendored
@@ -1,21 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
const (
|
||||
ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1"
|
||||
)
|
13
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go
generated
vendored
13
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go
generated
vendored
@@ -58,6 +58,9 @@ const (
|
||||
|
||||
MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted"
|
||||
MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted"
|
||||
|
||||
// In-toto attestation
|
||||
MediaTypeInToto = "application/vnd.in-toto+json"
|
||||
)
|
||||
|
||||
// DiffCompression returns the compression as defined by the layer diff media
|
||||
@@ -193,6 +196,16 @@ func IsKnownConfig(mt string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// IsAttestationType returns true if the media type is an attestation type
|
||||
func IsAttestationType(mt string) bool {
|
||||
switch mt {
|
||||
case MediaTypeInToto:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// ChildGCLabels returns the label for a given descriptor to reference it
|
||||
func ChildGCLabels(desc ocispec.Descriptor) []string {
|
||||
mt := desc.MediaType
|
||||
|
@@ -78,10 +78,10 @@ type TokenOptions struct {
|
||||
// FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token.
|
||||
//
|
||||
// For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request.
|
||||
// https://docs.docker.com/registry/spec/auth/token/#requesting-a-token
|
||||
// https://distribution.github.io/distribution/spec/auth/token/#requesting-a-token
|
||||
//
|
||||
// For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request.
|
||||
// https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token
|
||||
// https://distribution.github.io/distribution/spec/auth/oauth/#getting-a-token
|
||||
FetchRefreshToken bool
|
||||
}
|
||||
|
||||
|
@@ -70,9 +70,18 @@ func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt {
|
||||
}
|
||||
|
||||
// WithAuthHeader provides HTTP headers for authorization
|
||||
//
|
||||
// We need to merge instead of replacing because header may be set by
|
||||
// a per-host hosts.toml or/AND by a global header config (e.g., cri.config.headers)
|
||||
func WithAuthHeader(hdr http.Header) AuthorizerOpt {
|
||||
return func(opt *authorizerConfig) {
|
||||
opt.header = hdr
|
||||
if opt.header == nil {
|
||||
opt.header = hdr.Clone()
|
||||
} else {
|
||||
for k, v := range hdr {
|
||||
opt.header[k] = append(opt.header[k], v...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -88,7 +97,7 @@ func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt {
|
||||
|
||||
// NewDockerAuthorizer creates an authorizer using Docker's registry
|
||||
// authentication spec.
|
||||
// See https://docs.docker.com/registry/spec/auth/
|
||||
// See https://distribution.github.io/distribution/spec/auth/
|
||||
func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
|
||||
var ao authorizerConfig
|
||||
for _, opt := range opts {
|
||||
@@ -269,7 +278,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
|
||||
to.Scopes = GetTokenScopes(ctx, to.Scopes)
|
||||
|
||||
// Docs: https://docs.docker.com/registry/spec/auth/scope
|
||||
// Docs: https://distribution.github.io/distribution/spec/auth/scope/
|
||||
scoped := strings.Join(to.Scopes, " ")
|
||||
|
||||
// Keep track of the expiration time of cached bearer tokens so they can be
|
||||
|
@@ -1,54 +0,0 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||
"github.com/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
func FuzzConvertManifest(data []byte) int {
|
||||
ctx := context.Background()
|
||||
|
||||
// Do not log the message below
|
||||
// level=warning msg="do nothing for media type: ..."
|
||||
log.G(ctx).Logger.SetLevel(log.PanicLevel)
|
||||
|
||||
f := fuzz.NewConsumer(data)
|
||||
desc := ocispec.Descriptor{}
|
||||
err := f.GenerateStruct(&desc)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
tmpdir, err := os.MkdirTemp("", "fuzzing-")
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
cs, err := local.NewStore(tmpdir)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
_, _ = ConvertManifest(ctx, cs, desc)
|
||||
return 1
|
||||
}
|
@@ -18,8 +18,12 @@ package docker
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
)
|
||||
|
||||
// ErrorCoder is the base interface for ErrorCode and Error allowing
|
||||
@@ -281,3 +285,21 @@ func (errs *Errors) UnmarshalJSON(data []byte) error {
|
||||
*errs = newErrs
|
||||
return nil
|
||||
}
|
||||
|
||||
func unexpectedResponseErr(resp *http.Response) (retErr error) {
|
||||
retErr = remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
|
||||
// Decode registry error if provided
|
||||
if rerr := retErr.(remoteerrors.ErrUnexpectedStatus); len(rerr.Body) > 0 {
|
||||
var registryErr Errors
|
||||
if err := json.Unmarshal(rerr.Body, ®istryErr); err == nil && registryErr.Len() > 0 {
|
||||
// Join the unexpected error with the typed errors, when printed it will
|
||||
// show the unexpected error message and the registry errors. The body
|
||||
// is always excluded from the unexpected error message. This also allows
|
||||
// clients to decode into either type.
|
||||
retErr = errors.Join(rerr, registryErr)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
405
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go
generated
vendored
405
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go
generated
vendored
@@ -17,26 +17,202 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
)
|
||||
|
||||
type bufferPool struct {
|
||||
pool *sync.Pool
|
||||
}
|
||||
|
||||
func newbufferPool(bufCap int64) *bufferPool {
|
||||
pool := &sync.Pool{
|
||||
New: func() any {
|
||||
return bytes.NewBuffer(make([]byte, 0, bufCap))
|
||||
},
|
||||
}
|
||||
return &bufferPool{
|
||||
pool: pool,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *bufferPool) Get() *bytes.Buffer {
|
||||
buf := p.pool.Get().(*bytes.Buffer)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (p *bufferPool) Put(buffer *bytes.Buffer) {
|
||||
p.pool.Put(buffer)
|
||||
}
|
||||
|
||||
var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe")
|
||||
|
||||
// pipe implements an asynchronous buffered pipe designed for high-throughput
|
||||
// I/O with configurable initial buffer sizes and buffer reuse. It decouples
|
||||
// read/write operations, allowing writers to proceed without blocking (unless
|
||||
// the pipe is closed) and readers to wait efficiently for incoming data.
|
||||
//
|
||||
// Key Characteristics:
|
||||
// - Asynchronous Operation: Writers populate buffers independently of read
|
||||
// timing, enabling continuous data flow without reader-writer synchronization.
|
||||
// - Dynamic Buffering: Active buffer grows organically to handle large payloads, while
|
||||
// the initial capacity (bufCap) balances memory pre-allocation and growth overhead.
|
||||
// - Buffer Recycling: Retrieved from/pushed to a pool to minimize allocations, reducing
|
||||
// garbage collection pressure in sustained I/O scenarios.
|
||||
// - Error Semantics: Closes deterministically on first error (read or write), propagating
|
||||
// errors atomically to both ends while draining buffered data.
|
||||
//
|
||||
// Difference with io.Pipe:
|
||||
// - Unlike io.Pipe's strict synchronization (blocking write until read), this implementation
|
||||
// allows writers to buffer data ahead of reads, improving throughput for bursty workloads.
|
||||
//
|
||||
// Synchronization & Internals:
|
||||
// - Condition Variable (sync.Cond): Coordinates reader/writer, waking readers on new data
|
||||
// or closure. Locking is centralized via the condition's mutex.
|
||||
// - Buffer Lifecycle: Active buffer serves writes until read depletion, after which it's
|
||||
// recycled to the pool. Pooled buffers retain their capacity across uses.
|
||||
// - Error Handling: Write errors (werr) permanently fail writes; read errors (rerr) mark
|
||||
// terminal read state after buffer exhaustion.
|
||||
//
|
||||
// Future Considerations:
|
||||
// - Zero-copy reads/writes to avoid buffer copying overhead.
|
||||
// - Memory-mapped file backing for multi-gigabyte payloads.
|
||||
type pipe struct {
|
||||
cond *sync.Cond // Coordinates read/write signaling via Lock+Wait/Signal
|
||||
bufPool *bufferPool // Reusable buffers with initial capacity bufCap
|
||||
buf *bytes.Buffer // Active data buffer (nil when empty/returned to pool)
|
||||
rerr, werr error // Terminal read/write errors (sticky once set)
|
||||
}
|
||||
|
||||
type pipeReader struct {
|
||||
*pipe
|
||||
}
|
||||
|
||||
type pipeWriter struct {
|
||||
*pipe
|
||||
}
|
||||
|
||||
func newPipeWriter(bufPool *bufferPool) (*pipeReader, *pipeWriter) {
|
||||
p := &pipe{
|
||||
cond: sync.NewCond(new(sync.Mutex)),
|
||||
bufPool: bufPool,
|
||||
buf: nil,
|
||||
}
|
||||
return &pipeReader{
|
||||
pipe: p,
|
||||
}, &pipeWriter{
|
||||
pipe: p,
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the standard Read interface: it reads data from the pipe,
|
||||
// reading from the internal buffer, otherwise blocking until a writer arrives
|
||||
// or the write end is closed. If the write end is closed with an error, that
|
||||
// error is returned as err; otherwise err is io.EOF.
|
||||
func (r *pipeReader) Read(data []byte) (n int, err error) {
|
||||
r.cond.L.Lock()
|
||||
defer r.cond.L.Unlock()
|
||||
|
||||
if r.buf == nil {
|
||||
r.buf = r.bufPool.Get()
|
||||
}
|
||||
|
||||
for {
|
||||
n, err = r.buf.Read(data)
|
||||
// If not closed and no read, wait for writing.
|
||||
if err == io.EOF && r.rerr == nil && n == 0 {
|
||||
r.cond.Wait() // Wait for data to be written
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if err == io.EOF {
|
||||
// Put buffer back to pool
|
||||
r.bufPool.Put(r.buf)
|
||||
r.buf = nil
|
||||
return n, r.rerr
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the reader; subsequent writes from the write half of the pipe
|
||||
// will return error ErrClosedPipe.
|
||||
func (r *pipeReader) Close() error {
|
||||
return r.CloseWithError(nil)
|
||||
}
|
||||
|
||||
// CloseWithError closes the reader; subsequent writes to the write half of the
|
||||
// pipe will return the error err.
|
||||
func (r *pipeReader) CloseWithError(err error) error {
|
||||
r.cond.L.Lock()
|
||||
defer r.cond.L.Unlock()
|
||||
|
||||
if err == nil {
|
||||
err = ErrClosedPipe
|
||||
}
|
||||
r.werr = err
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write implements the standard Write interface: it writes data to the internal
|
||||
// buffer. If the read end is closed with an error, that err is returned as err;
|
||||
// otherwise err is ErrClosedPipe.
|
||||
func (w *pipeWriter) Write(data []byte) (int, error) {
|
||||
w.cond.L.Lock()
|
||||
defer w.cond.L.Unlock()
|
||||
|
||||
if w.werr != nil {
|
||||
return 0, w.werr
|
||||
}
|
||||
|
||||
if w.buf == nil {
|
||||
w.buf = w.bufPool.Get()
|
||||
}
|
||||
|
||||
n, err := w.buf.Write(data)
|
||||
w.cond.Signal()
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Close closes the writer; subsequent reads from the read half of the pipe will
|
||||
// return io.EOF once the internal buffer get empty.
|
||||
func (w *pipeWriter) Close() error {
|
||||
return w.CloseWithError(nil)
|
||||
}
|
||||
|
||||
// Close closes the writer; subsequent reads from the read half of the pipe will
|
||||
// return err once the internal buffer get empty.
|
||||
func (w *pipeWriter) CloseWithError(err error) error {
|
||||
w.cond.L.Lock()
|
||||
defer w.cond.L.Unlock()
|
||||
|
||||
if err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
w.rerr = err
|
||||
w.cond.Broadcast()
|
||||
return nil
|
||||
}
|
||||
|
||||
type dockerFetcher struct {
|
||||
*dockerBase
|
||||
}
|
||||
@@ -84,7 +260,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
req.path = req.path + "?" + u.RawQuery
|
||||
}
|
||||
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset, false)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
continue // try one of the other urls.
|
||||
@@ -97,17 +273,16 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
}
|
||||
|
||||
// Try manifests endpoints for manifests types
|
||||
if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) ||
|
||||
desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
||||
if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) {
|
||||
|
||||
var firstErr error
|
||||
for _, host := range r.hosts {
|
||||
for i, host := range r.hosts {
|
||||
req := r.request(host, http.MethodGet, "manifests", desc.Digest.String())
|
||||
if err := req.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset, i == len(r.hosts)-1)
|
||||
if err != nil {
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
@@ -124,13 +299,13 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
|
||||
// Finally use blobs endpoints
|
||||
var firstErr error
|
||||
for _, host := range r.hosts {
|
||||
for i, host := range r.hosts {
|
||||
req := r.request(host, http.MethodGet, "blobs", desc.Digest.String())
|
||||
if err := req.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset)
|
||||
rc, err := r.open(ctx, req, desc.MediaType, offset, i == len(r.hosts)-1)
|
||||
if err != nil {
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
@@ -153,7 +328,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
})
|
||||
}
|
||||
|
||||
func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, mediatype string, ps ...string) (*request, int64, error) {
|
||||
func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, lastHost bool, mediatype string, ps ...string) (*request, int64, error) {
|
||||
headReq := r.request(host, http.MethodHead, ps...)
|
||||
if err := headReq.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, 0, err
|
||||
@@ -165,7 +340,7 @@ func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, medi
|
||||
headReq.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
}
|
||||
|
||||
headResp, err := headReq.doWithRetries(ctx, nil)
|
||||
headResp, err := headReq.doWithRetries(ctx, lastHost)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -209,8 +384,8 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op
|
||||
firstErr error
|
||||
)
|
||||
|
||||
for _, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "blobs", dgst.String())
|
||||
for i, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, i == len(r.hosts)-1, config.Mediatype, "blobs", dgst.String())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@@ -222,8 +397,8 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op
|
||||
|
||||
if getReq == nil {
|
||||
// Fall back to the "manifests" endpoint
|
||||
for _, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "manifests", dgst.String())
|
||||
for i, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, i == len(r.hosts)-1, config.Mediatype, "manifests", dgst.String())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@@ -245,7 +420,7 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op
|
||||
}
|
||||
|
||||
seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) {
|
||||
return r.open(ctx, getReq, config.Mediatype, offset)
|
||||
return r.open(ctx, getReq, config.Mediatype, offset, true)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, desc, err
|
||||
@@ -262,80 +437,148 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op
|
||||
return seeker, desc, nil
|
||||
}
|
||||
|
||||
func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) {
|
||||
if mediatype == "" {
|
||||
req.header.Set("Accept", "*/*")
|
||||
} else {
|
||||
req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64, lastHost bool) (_ io.ReadCloser, retErr error) {
|
||||
const minChunkSize = 512
|
||||
|
||||
chunkSize := int64(r.performances.ConcurrentLayerFetchBuffer)
|
||||
parallelism := int64(r.performances.MaxConcurrentDownloads)
|
||||
if chunkSize < minChunkSize || req.body != nil {
|
||||
parallelism = 1
|
||||
}
|
||||
log.G(ctx).WithField("initial_parallelism", r.performances.MaxConcurrentDownloads).
|
||||
WithField("parallelism", parallelism).
|
||||
WithField("chunk_size", chunkSize).
|
||||
WithField("offset", offset).
|
||||
Debug("fetching layer")
|
||||
req.setMediaType(mediatype)
|
||||
req.header.Set("Accept-Encoding", "zstd;q=1.0, gzip;q=0.8, deflate;q=0.5")
|
||||
|
||||
if offset > 0 {
|
||||
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
|
||||
// will return the header without supporting the range. The content
|
||||
// range must always be checked.
|
||||
req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||
if parallelism > 1 || offset > 0 {
|
||||
req.setOffset(offset)
|
||||
}
|
||||
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
if err := r.Acquire(ctx, 1); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if retErr != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if resp.StatusCode > 299 {
|
||||
// TODO(stevvooe): When doing a offset specific request, we should
|
||||
// really distinguish between a 206 and a 200. In the case of 200, we
|
||||
// can discard the bytes, hiding the seek behavior from the
|
||||
// implementation.
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound)
|
||||
}
|
||||
var registryErr Errors
|
||||
if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
|
||||
return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
|
||||
}
|
||||
if offset > 0 {
|
||||
cr := resp.Header.Get("content-range")
|
||||
if cr != "" {
|
||||
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
|
||||
return nil, fmt.Errorf("unhandled content range in response: %v", cr)
|
||||
|
||||
}
|
||||
} else {
|
||||
// TODO: Should any cases where use of content range
|
||||
// without the proper header be considered?
|
||||
// 206 responses?
|
||||
|
||||
// Discard up to offset
|
||||
// Could use buffer pool here but this case should be rare
|
||||
n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discard to offset: %w", err)
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.New("unable to discard to offset")
|
||||
}
|
||||
|
||||
resp, err := req.doWithRetries(ctx, lastHost, withErrorCheck, withOffsetCheck(offset))
|
||||
switch err {
|
||||
case nil:
|
||||
// all good
|
||||
case errContentRangeIgnored:
|
||||
if parallelism != 1 {
|
||||
log.G(ctx).WithError(err).Info("remote host ignored content range, forcing parallelism to 1")
|
||||
parallelism = 1
|
||||
}
|
||||
default:
|
||||
log.G(ctx).WithError(err).Debug("fetch failed")
|
||||
r.Release(1)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
body := resp.Body
|
||||
encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool {
|
||||
return r == ' ' || r == '\t' || r == ','
|
||||
})
|
||||
|
||||
remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 0)
|
||||
if remaining <= chunkSize {
|
||||
parallelism = 1
|
||||
}
|
||||
|
||||
if parallelism > 1 {
|
||||
// If we have a content length, we can use multiple requests to fetch
|
||||
// the content in parallel. This will make download of bigger bodies
|
||||
// faster, at the cost of parallelism more requests and max
|
||||
// ~(max_parallelism * goroutine footprint) memory usage. The goroutine
|
||||
// footprint should be: the goroutine stack + pipe buffer size
|
||||
numChunks := remaining / chunkSize
|
||||
if numChunks*chunkSize < remaining {
|
||||
numChunks++
|
||||
}
|
||||
if numChunks < parallelism {
|
||||
parallelism = numChunks
|
||||
}
|
||||
queue := make(chan int64, parallelism)
|
||||
ctx, cancelCtx := context.WithCancel(ctx)
|
||||
done := ctx.Done()
|
||||
readers, writers := make([]io.Reader, numChunks), make([]*pipeWriter, numChunks)
|
||||
bufPool := newbufferPool(chunkSize)
|
||||
for i := range numChunks {
|
||||
readers[i], writers[i] = newPipeWriter(bufPool)
|
||||
}
|
||||
go func() {
|
||||
for i := range numChunks {
|
||||
select {
|
||||
case queue <- i:
|
||||
case <-done:
|
||||
return // avoid leaking a goroutine if we exit early.
|
||||
}
|
||||
}
|
||||
close(queue)
|
||||
}()
|
||||
r.Release(1)
|
||||
for range parallelism {
|
||||
go func() {
|
||||
for i := range queue { // first in first out
|
||||
copy := func() error {
|
||||
if err := r.Acquire(ctx, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Release(1)
|
||||
var body io.ReadCloser
|
||||
if i == 0 {
|
||||
body = resp.Body
|
||||
} else {
|
||||
reqClone := req.clone()
|
||||
reqClone.setOffset(offset + i*chunkSize)
|
||||
nresp, err := reqClone.doWithRetries(ctx, lastHost, withErrorCheck)
|
||||
if err != nil {
|
||||
_ = writers[i].CloseWithError(err)
|
||||
select {
|
||||
case <-done:
|
||||
return ctx.Err()
|
||||
default:
|
||||
cancelCtx()
|
||||
}
|
||||
return err
|
||||
}
|
||||
body = nresp.Body
|
||||
}
|
||||
_, err := io.Copy(writers[i], io.LimitReader(body, chunkSize))
|
||||
_ = body.Close()
|
||||
_ = writers[i].CloseWithError(err)
|
||||
if err != nil && err != io.EOF {
|
||||
cancelCtx()
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if copy() != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
body = &fnOnClose{
|
||||
BeforeClose: func() {
|
||||
cancelCtx()
|
||||
},
|
||||
ReadCloser: io.NopCloser(io.MultiReader(readers...)),
|
||||
}
|
||||
} else {
|
||||
body = &fnOnClose{
|
||||
BeforeClose: func() {
|
||||
r.Release(1)
|
||||
},
|
||||
ReadCloser: body,
|
||||
}
|
||||
}
|
||||
for i := len(encoding) - 1; i >= 0; i-- {
|
||||
algorithm := strings.ToLower(encoding[i])
|
||||
switch algorithm {
|
||||
case "zstd":
|
||||
r, err := zstd.NewReader(body)
|
||||
r, err := zstd.NewReader(body,
|
||||
zstd.WithDecoderLowmem(false),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -356,3 +599,17 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
|
||||
|
||||
return body, nil
|
||||
}
|
||||
|
||||
type fnOnClose struct {
|
||||
BeforeClose func()
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
// Close calls the BeforeClose function before closing the underlying
|
||||
// ReadCloser.
|
||||
func (f *fnOnClose) Close() error {
|
||||
f.BeforeClose()
|
||||
return f.ReadCloser.Close()
|
||||
}
|
||||
|
||||
var _ io.ReadCloser = &fnOnClose{}
|
||||
|
@@ -1,75 +0,0 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func FuzzFetcher(data []byte) int {
|
||||
dataLen := len(data)
|
||||
if dataLen == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen))
|
||||
rw.Header().Set("content-length", strconv.Itoa(dataLen))
|
||||
rw.Write(data)
|
||||
}))
|
||||
defer s.Close()
|
||||
|
||||
u, err := url.Parse(s.URL)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
f := dockerFetcher{&dockerBase{
|
||||
repository: "nonempty",
|
||||
}}
|
||||
host := RegistryHost{
|
||||
Client: s.Client(),
|
||||
Host: u.Host,
|
||||
Scheme: u.Scheme,
|
||||
Path: u.Path,
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
req := f.request(host, http.MethodGet)
|
||||
rc, err := f.open(ctx, req, "", 0)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
b, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
expected := data
|
||||
if len(b) != len(expected) {
|
||||
panic("len of request is not equal to len of expected but should be")
|
||||
}
|
||||
return 1
|
||||
}
|
@@ -28,14 +28,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
)
|
||||
|
||||
type dockerPusher struct {
|
||||
@@ -115,7 +115,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
|
||||
log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to")
|
||||
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
resp, err := req.doWithRetries(ctx, true)
|
||||
if err != nil {
|
||||
if !errors.Is(err, ErrInvalidAuthorization) {
|
||||
return nil, err
|
||||
@@ -148,8 +148,8 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
} else if resp.StatusCode != http.StatusNotFound {
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
err := unexpectedResponseErr(resp)
|
||||
log.G(ctx).WithError(err).Debug("unexpected response")
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
@@ -176,24 +176,28 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
//
|
||||
// for the private repo, we should remove mount-from
|
||||
// query and send the request again.
|
||||
resp, err = preq.doWithRetries(pctx, nil)
|
||||
resp, err = preq.doWithRetries(pctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !errors.Is(err, ErrInvalidAuthorization) {
|
||||
return nil, fmt.Errorf("pushing with mount from %s: %w", fromRepo, err)
|
||||
}
|
||||
log.G(ctx).Debugf("failed to push with mount from repository %s: %v", fromRepo, err)
|
||||
}
|
||||
if resp != nil {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
log.G(ctx).Debugf("failed to mount from repository %s, not authorized", fromRepo)
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
log.G(ctx).Debugf("failed to mount from repository %s", fromRepo)
|
||||
|
||||
resp.Body.Close()
|
||||
resp = nil
|
||||
case http.StatusCreated:
|
||||
mountedFrom = path.Join(p.refspec.Hostname(), fromRepo)
|
||||
resp.Body.Close()
|
||||
resp = nil
|
||||
case http.StatusCreated:
|
||||
mountedFrom = path.Join(p.refspec.Hostname(), fromRepo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resp == nil {
|
||||
resp, err = req.doWithRetries(ctx, nil)
|
||||
resp, err = req.doWithRetries(ctx, true)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrInvalidAuthorization) {
|
||||
return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err)
|
||||
@@ -219,8 +223,8 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
})
|
||||
return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
|
||||
default:
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
err := unexpectedResponseErr(resp)
|
||||
log.G(ctx).WithError(err).Debug("unexpected response")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -285,7 +289,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
req.size = desc.Size
|
||||
|
||||
go func() {
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
resp, err := req.doWithRetries(ctx, true)
|
||||
if err != nil {
|
||||
pushw.setError(err)
|
||||
return
|
||||
@@ -294,8 +298,8 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent:
|
||||
default:
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
err := unexpectedResponseErr(resp)
|
||||
log.G(ctx).WithError(err).Debug("unexpected response")
|
||||
pushw.setError(err)
|
||||
return
|
||||
}
|
||||
@@ -477,13 +481,15 @@ func (pw *pushWriter) Digest() digest.Digest {
|
||||
|
||||
func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Check whether read has already thrown an error
|
||||
if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
return fmt.Errorf("pipe error before commit: %w", err)
|
||||
if pw.pipe != nil {
|
||||
if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) {
|
||||
return fmt.Errorf("pipe error before commit: %w", err)
|
||||
}
|
||||
if err := pw.pipe.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := pw.pipe.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: timeout waiting for response
|
||||
var resp *http.Response
|
||||
select {
|
||||
@@ -506,7 +512,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted:
|
||||
default:
|
||||
return remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
return unexpectedResponseErr(resp)
|
||||
}
|
||||
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
|
@@ -17,9 +17,11 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HostCapabilities represent the capabilities of the registry
|
||||
@@ -170,7 +172,9 @@ func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts {
|
||||
}
|
||||
|
||||
if config.Client == nil {
|
||||
config.Client = http.DefaultClient
|
||||
config.Client = &http.Client{
|
||||
Transport: DefaultHTTPTransport(nil),
|
||||
}
|
||||
}
|
||||
|
||||
if opts.plainHTTP != nil {
|
||||
@@ -242,3 +246,19 @@ func MatchLocalhost(host string) (bool, error) {
|
||||
|
||||
return ip.IsLoopback(), nil
|
||||
}
|
||||
|
||||
func DefaultHTTPTransport(defaultTLSConfig *tls.Config) *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
FallbackDelay: 300 * time.Millisecond,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 10,
|
||||
IdleConnTimeout: 30 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
TLSClientConfig: defaultTLSConfig,
|
||||
ExpectContinueTimeout: 5 * time.Second,
|
||||
}
|
||||
}
|
||||
|
172
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go
generated
vendored
172
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go
generated
vendored
@@ -34,11 +34,11 @@ import (
|
||||
"github.com/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/core/transfer"
|
||||
"github.com/containerd/containerd/v2/pkg/reference"
|
||||
"github.com/containerd/containerd/v2/pkg/tracing"
|
||||
"github.com/containerd/containerd/v2/version"
|
||||
@@ -142,6 +142,7 @@ type dockerResolver struct {
|
||||
header http.Header
|
||||
resolveHeader http.Header
|
||||
tracker StatusTracker
|
||||
config transfer.ImageResolverOptions
|
||||
}
|
||||
|
||||
// NewResolver returns a new resolver to a Docker registry
|
||||
@@ -156,9 +157,6 @@ func NewResolver(options ResolverOptions) remotes.Resolver {
|
||||
// make a copy of the headers to avoid race due to concurrent map write
|
||||
options.Headers = options.Headers.Clone()
|
||||
}
|
||||
if _, ok := options.Headers["User-Agent"]; !ok {
|
||||
options.Headers.Set("User-Agent", "containerd/"+version.Version)
|
||||
}
|
||||
|
||||
resolveHeader := http.Header{}
|
||||
if _, ok := options.Headers["Accept"]; !ok {
|
||||
@@ -232,7 +230,7 @@ func (r *countingReader) Read(p []byte) (int, error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
var _ remotes.Resolver = &dockerResolver{}
|
||||
var _ remotes.ResolverWithOptions = &dockerResolver{}
|
||||
|
||||
func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
|
||||
base, err := r.resolveDockerBase(ref)
|
||||
@@ -307,7 +305,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
}
|
||||
|
||||
log.G(ctx).Debug("resolving")
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
resp, err := req.doWithRetries(ctx, i == len(hosts)-1)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrInvalidAuthorization) {
|
||||
err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err)
|
||||
@@ -332,13 +330,13 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
}
|
||||
if resp.StatusCode > 399 {
|
||||
if firstErrPriority < 3 {
|
||||
firstErr = remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
firstErr = unexpectedResponseErr(resp)
|
||||
firstErrPriority = 3
|
||||
}
|
||||
log.G(ctx).Infof("%s after status: %s", nextHostOrFail(i), resp.Status)
|
||||
continue // try another host
|
||||
}
|
||||
return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
return "", ocispec.Descriptor{}, unexpectedResponseErr(resp)
|
||||
}
|
||||
size := resp.ContentLength
|
||||
contentType := getManifestMediaType(resp)
|
||||
@@ -371,7 +369,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
req.header[key] = append(req.header[key], value...)
|
||||
}
|
||||
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
resp, err := req.doWithRetries(ctx, true)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
@@ -387,13 +385,8 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
}
|
||||
|
||||
if contentType == images.MediaTypeDockerSchema1Manifest {
|
||||
b, err := schema1.ReadStripSignature(&bodyReader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dgst = digest.FromBytes(b)
|
||||
return nil
|
||||
return fmt.Errorf("%w: media type %q is no longer supported since containerd v2.0, please rebuild the image as %q or %q",
|
||||
errdefs.ErrNotImplemented, images.MediaTypeDockerSchema1Manifest, images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest)
|
||||
}
|
||||
|
||||
dgst, err = digest.FromReader(&bodyReader)
|
||||
@@ -433,6 +426,12 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
return "", ocispec.Descriptor{}, firstErr
|
||||
}
|
||||
|
||||
func (r *dockerResolver) SetOptions(options ...transfer.ImageResolverOption) {
|
||||
for _, opt := range options {
|
||||
opt(&r.config)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
|
||||
base, err := r.resolveDockerBase(ref)
|
||||
if err != nil {
|
||||
@@ -467,10 +466,25 @@ func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) {
|
||||
}
|
||||
|
||||
type dockerBase struct {
|
||||
refspec reference.Spec
|
||||
repository string
|
||||
hosts []RegistryHost
|
||||
header http.Header
|
||||
refspec reference.Spec
|
||||
repository string
|
||||
hosts []RegistryHost
|
||||
header http.Header
|
||||
performances transfer.ImageResolverPerformanceSettings
|
||||
limiter *semaphore.Weighted
|
||||
}
|
||||
|
||||
func (r *dockerBase) Acquire(ctx context.Context, weight int64) error {
|
||||
if r.limiter == nil {
|
||||
return nil
|
||||
}
|
||||
return r.limiter.Acquire(ctx, weight)
|
||||
}
|
||||
|
||||
func (r *dockerBase) Release(weight int64) {
|
||||
if r.limiter != nil {
|
||||
r.limiter.Release(weight)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
|
||||
@@ -480,10 +494,12 @@ func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
|
||||
return nil, err
|
||||
}
|
||||
return &dockerBase{
|
||||
refspec: refspec,
|
||||
repository: strings.TrimPrefix(refspec.Locator, host+"/"),
|
||||
hosts: hosts,
|
||||
header: r.header,
|
||||
refspec: refspec,
|
||||
repository: strings.TrimPrefix(refspec.Locator, host+"/"),
|
||||
hosts: hosts,
|
||||
header: r.header,
|
||||
performances: r.config.Performances,
|
||||
limiter: r.config.DownloadLimiter,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -505,6 +521,11 @@ func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *re
|
||||
for key, value := range host.Header {
|
||||
header[key] = append(header[key], value...)
|
||||
}
|
||||
|
||||
if len(header.Get("User-Agent")) == 0 {
|
||||
header.Set("User-Agent", "containerd/"+version.Version)
|
||||
}
|
||||
|
||||
parts := append([]string{"/", host.Path, r.repository}, ps...)
|
||||
p := path.Join(parts...)
|
||||
// Join strips trailing slash, re-add ending "/" if included
|
||||
@@ -562,6 +583,12 @@ type request struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
func (r *request) clone() *request {
|
||||
res := *r
|
||||
res.header = r.header.Clone()
|
||||
return &res
|
||||
}
|
||||
|
||||
func (r *request) do(ctx context.Context) (*http.Response, error) {
|
||||
u := r.host.Scheme + "://" + r.host.Host + r.path
|
||||
req, err := http.NewRequestWithContext(ctx, r.method, u, nil)
|
||||
@@ -617,26 +644,91 @@ func (r *request) do(ctx context.Context) (*http.Response, error) {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) {
|
||||
type doChecks func(r *request, resp *http.Response) error
|
||||
|
||||
func withErrorCheck(r *request, resp *http.Response) error {
|
||||
if resp.StatusCode > 299 {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return fmt.Errorf("content at %v not found: %w", r.String(), errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return unexpectedResponseErr(resp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var errContentRangeIgnored = errors.New("content range requests ignored")
|
||||
|
||||
func withOffsetCheck(offset int64) doChecks {
|
||||
return func(r *request, resp *http.Response) error {
|
||||
if offset == 0 {
|
||||
return nil
|
||||
}
|
||||
if resp.StatusCode == http.StatusPartialContent {
|
||||
return nil
|
||||
}
|
||||
if cr := resp.Header.Get("Content-Range"); cr != "" {
|
||||
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
|
||||
return fmt.Errorf("unhandled content range in response: %v", cr)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Discard up to offset
|
||||
// Could use buffer pool here but this case should be rare
|
||||
n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to discard to offset: %w", err)
|
||||
}
|
||||
if n != offset {
|
||||
return errors.New("unable to discard to offset")
|
||||
}
|
||||
|
||||
// content range ignored, we can't do concurrent fetches here.
|
||||
// return an error to be caught
|
||||
return errContentRangeIgnored
|
||||
}
|
||||
}
|
||||
|
||||
func (r *request) doWithRetries(ctx context.Context, lastHost bool, checks ...doChecks) (resp *http.Response, err error) {
|
||||
resp, err = r.doWithRetriesInner(ctx, nil, lastHost)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil && err != errContentRangeIgnored {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
for _, check := range checks {
|
||||
if err := check(r, resp); err != nil {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (r *request) doWithRetriesInner(ctx context.Context, responses []*http.Response, lastHost bool) (*http.Response, error) {
|
||||
resp, err := r.do(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
responses = append(responses, resp)
|
||||
retry, err := r.retryRequest(ctx, responses)
|
||||
retry, err := r.retryRequest(ctx, responses, lastHost)
|
||||
if err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
if retry {
|
||||
resp.Body.Close()
|
||||
return r.doWithRetries(ctx, responses)
|
||||
return r.doWithRetriesInner(ctx, responses, lastHost)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) {
|
||||
func (r *request) retryRequest(ctx context.Context, responses []*http.Response, lastHost bool) (bool, error) {
|
||||
if len(responses) > 5 {
|
||||
return false, nil
|
||||
}
|
||||
@@ -662,9 +754,17 @@ func (r *request) retryRequest(ctx context.Context, responses []*http.Response)
|
||||
}
|
||||
case http.StatusRequestTimeout, http.StatusTooManyRequests:
|
||||
return true, nil
|
||||
case http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusInternalServerError:
|
||||
// Do not retry if the same error was seen in the last request
|
||||
if len(responses) > 1 && responses[len(responses)-2].StatusCode == last.StatusCode {
|
||||
return false, nil
|
||||
}
|
||||
// Only retry if this is the last host that will be attempted
|
||||
if lastHost {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Handle 50x errors accounting for attempt history
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -672,6 +772,18 @@ func (r *request) String() string {
|
||||
return r.host.Scheme + "://" + r.host.Host + r.path
|
||||
}
|
||||
|
||||
func (r *request) setMediaType(mediatype string) {
|
||||
if mediatype == "" {
|
||||
r.header.Set("Accept", "*/*")
|
||||
} else {
|
||||
r.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (r *request) setOffset(offset int64) {
|
||||
r.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
|
||||
}
|
||||
|
||||
func requestFields(req *http.Request) log.Fields {
|
||||
fields := map[string]interface{}{
|
||||
"request.method": req.Method,
|
||||
|
@@ -1,626 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1.
|
||||
//
|
||||
// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1.
|
||||
package schema1
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/pkg/archive/compression"
|
||||
"github.com/containerd/containerd/v2/pkg/deprecation"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
manifestSizeLimit = 8e6 // 8MB
|
||||
labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer"
|
||||
)
|
||||
|
||||
type blobState struct {
|
||||
diffID digest.Digest
|
||||
empty bool
|
||||
}
|
||||
|
||||
// Converter converts schema1 manifests to schema2 on fetch
|
||||
type Converter struct {
|
||||
contentStore content.Store
|
||||
fetcher remotes.Fetcher
|
||||
|
||||
pulledManifest *manifest
|
||||
|
||||
mu sync.Mutex
|
||||
blobMap map[digest.Digest]blobState
|
||||
layerBlobs map[digest.Digest]ocispec.Descriptor
|
||||
}
|
||||
|
||||
var ErrDisabled = fmt.Errorf("Pulling Schema 1 images have been deprecated and disabled by default since containerd v2.0. "+
|
||||
"As a workaround you may set an environment variable `%s=1`, but this will be completely removed in containerd v2.1.",
|
||||
deprecation.EnvPullSchema1Image)
|
||||
|
||||
// NewConverter returns a new converter
|
||||
func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) (*Converter, error) {
|
||||
s := os.Getenv(deprecation.EnvPullSchema1Image)
|
||||
if s == "" {
|
||||
return nil, ErrDisabled
|
||||
}
|
||||
enable, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse `%s=%s`: %w", deprecation.EnvPullSchema1Image, s, err)
|
||||
}
|
||||
if !enable {
|
||||
return nil, ErrDisabled
|
||||
}
|
||||
log.L.Warn(ErrDisabled)
|
||||
return &Converter{
|
||||
contentStore: contentStore,
|
||||
fetcher: fetcher,
|
||||
blobMap: map[digest.Digest]blobState{},
|
||||
layerBlobs: map[digest.Digest]ocispec.Descriptor{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Handle fetching descriptors for a docker media type
|
||||
func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema1Manifest:
|
||||
if err := c.fetchManifest(ctx, desc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m := c.pulledManifest
|
||||
if len(m.FSLayers) != len(m.History) {
|
||||
return nil, errors.New("invalid schema 1 manifest, history and layer mismatch")
|
||||
}
|
||||
descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers))
|
||||
|
||||
for i := range m.FSLayers {
|
||||
if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok {
|
||||
empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do no attempt to download a known empty blob
|
||||
if !empty {
|
||||
descs = append([]ocispec.Descriptor{
|
||||
{
|
||||
MediaType: images.MediaTypeDockerSchema2LayerGzip,
|
||||
Digest: c.pulledManifest.FSLayers[i].BlobSum,
|
||||
Size: -1,
|
||||
},
|
||||
}, descs...)
|
||||
}
|
||||
c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{
|
||||
empty: empty,
|
||||
}
|
||||
}
|
||||
}
|
||||
return descs, nil
|
||||
case images.MediaTypeDockerSchema2LayerGzip:
|
||||
if c.pulledManifest == nil {
|
||||
return nil, errors.New("manifest required for schema 1 blob pull")
|
||||
}
|
||||
return nil, c.fetchBlob(ctx, desc)
|
||||
default:
|
||||
return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
// ConvertOptions provides options on converting a docker schema1 manifest.
|
||||
type ConvertOptions struct {
|
||||
// ManifestMediaType specifies the media type of the manifest OCI descriptor.
|
||||
ManifestMediaType string
|
||||
|
||||
// ConfigMediaType specifies the media type of the manifest config OCI
|
||||
// descriptor.
|
||||
ConfigMediaType string
|
||||
}
|
||||
|
||||
// ConvertOpt allows configuring a convert operation.
|
||||
type ConvertOpt func(context.Context, *ConvertOptions) error
|
||||
|
||||
// UseDockerSchema2 is used to indicate that a schema1 manifest should be
|
||||
// converted into the media types for a docker schema2 manifest.
|
||||
func UseDockerSchema2() ConvertOpt {
|
||||
return func(ctx context.Context, o *ConvertOptions) error {
|
||||
o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest
|
||||
o.ConfigMediaType = images.MediaTypeDockerSchema2Config
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a docker manifest to an OCI descriptor
|
||||
func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) {
|
||||
co := ConvertOptions{
|
||||
ManifestMediaType: ocispec.MediaTypeImageManifest,
|
||||
ConfigMediaType: ocispec.MediaTypeImageConfig,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &co); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
history, diffIDs, err := c.schema1ManifestHistory()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err)
|
||||
}
|
||||
|
||||
var img ocispec.Image
|
||||
if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err)
|
||||
}
|
||||
|
||||
img.History = history
|
||||
img.RootFS = ocispec.RootFS{
|
||||
Type: "layers",
|
||||
DiffIDs: diffIDs,
|
||||
}
|
||||
|
||||
b, err := json.MarshalIndent(img, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
|
||||
}
|
||||
|
||||
config := ocispec.Descriptor{
|
||||
MediaType: co.ConfigMediaType,
|
||||
Digest: digest.Canonical.FromBytes(b),
|
||||
Size: int64(len(b)),
|
||||
}
|
||||
|
||||
layers := make([]ocispec.Descriptor, len(diffIDs))
|
||||
for i, diffID := range diffIDs {
|
||||
layers[i] = c.layerBlobs[diffID]
|
||||
}
|
||||
|
||||
manifest := ocispec.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Config: config,
|
||||
Layers: layers,
|
||||
}
|
||||
|
||||
mb, err := json.MarshalIndent(manifest, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
MediaType: co.ManifestMediaType,
|
||||
Digest: digest.Canonical.FromBytes(mb),
|
||||
Size: int64(len(mb)),
|
||||
}
|
||||
|
||||
labels := map[string]string{}
|
||||
labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String()
|
||||
for i, ch := range manifest.Layers {
|
||||
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String()
|
||||
}
|
||||
|
||||
ref := remotes.MakeRefKey(ctx, desc)
|
||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err)
|
||||
}
|
||||
|
||||
ref = remotes.MakeRefKey(ctx, config)
|
||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err)
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
// ReadStripSignature reads in a schema1 manifest and returns a byte array
|
||||
// with the "signatures" field stripped
|
||||
func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) {
|
||||
b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return stripSignature(b)
|
||||
}
|
||||
|
||||
func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error {
|
||||
log.G(ctx).Debug("fetch schema 1")
|
||||
|
||||
rc, err := c.fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err := ReadStripSignature(rc)
|
||||
rc.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var m manifest
|
||||
if err := json.Unmarshal(b, &m); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(m.Manifests) != 0 || len(m.Layers) != 0 {
|
||||
return errors.New("converter: expected schema1 document but found extra keys")
|
||||
}
|
||||
c.pulledManifest = &m
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error {
|
||||
log.G(ctx).Debug("fetch blob")
|
||||
|
||||
var (
|
||||
ref = remotes.MakeRefKey(ctx, desc)
|
||||
calc = newBlobStateCalculator()
|
||||
compressMethod = compression.Gzip
|
||||
)
|
||||
|
||||
// size may be unknown, set to zero for content ingest
|
||||
ingestDesc := desc
|
||||
if ingestDesc.Size == -1 {
|
||||
ingestDesc.Size = 0
|
||||
}
|
||||
|
||||
cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
reuse, err := c.reuseLabelBlobState(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if reuse {
|
||||
return nil
|
||||
}
|
||||
|
||||
ra, err := c.contentStore.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer ra.Close()
|
||||
|
||||
r, err := compression.DecompressStream(content.NewReader(ra))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressMethod = r.GetCompression()
|
||||
_, err = io.Copy(calc, r)
|
||||
r.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
defer cw.Close()
|
||||
|
||||
rc, err := c.fetcher.Fetch(ctx, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
eg.Go(func() error {
|
||||
r, err := compression.DecompressStream(pr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compressMethod = r.GetCompression()
|
||||
_, err = io.Copy(calc, r)
|
||||
r.Close()
|
||||
pr.CloseWithError(err)
|
||||
return err
|
||||
})
|
||||
|
||||
eg.Go(func() error {
|
||||
defer pw.Close()
|
||||
|
||||
return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest)
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if desc.Size == -1 {
|
||||
info, err := c.contentStore.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get blob info: %w", err)
|
||||
}
|
||||
desc.Size = info.Size
|
||||
}
|
||||
|
||||
if compressMethod == compression.Uncompressed {
|
||||
log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob")
|
||||
desc.MediaType = images.MediaTypeDockerSchema2Layer
|
||||
}
|
||||
|
||||
state := calc.State()
|
||||
|
||||
cinfo := content.Info{
|
||||
Digest: desc.Digest,
|
||||
Labels: map[string]string{
|
||||
labels.LabelUncompressed: state.diffID.String(),
|
||||
labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil {
|
||||
return fmt.Errorf("failed to update uncompressed label: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.blobMap[desc.Digest] = state
|
||||
c.layerBlobs[state.diffID] = desc
|
||||
c.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) {
|
||||
cinfo, err := c.contentStore.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get blob info: %w", err)
|
||||
}
|
||||
desc.Size = cinfo.Size
|
||||
|
||||
diffID, ok := cinfo.Labels[labels.LabelUncompressed]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer]
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isEmpty, err := strconv.ParseBool(emptyVal)
|
||||
if err != nil {
|
||||
log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
bState := blobState{empty: isEmpty}
|
||||
|
||||
if bState.diffID, err = digest.Parse(diffID); err != nil {
|
||||
log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// NOTE: there is no need to read header to get compression method
|
||||
// because there are only two kinds of methods.
|
||||
if bState.diffID == desc.Digest {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2Layer
|
||||
} else {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.blobMap[desc.Digest] = bState
|
||||
c.layerBlobs[bState.diffID] = desc
|
||||
c.mu.Unlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) {
|
||||
if c.pulledManifest == nil {
|
||||
return nil, nil, errors.New("missing schema 1 manifest for conversion")
|
||||
}
|
||||
m := *c.pulledManifest
|
||||
|
||||
if len(m.History) == 0 {
|
||||
return nil, nil, errors.New("no history")
|
||||
}
|
||||
|
||||
history := make([]ocispec.History, len(m.History))
|
||||
diffIDs := []digest.Digest{}
|
||||
for i := range m.History {
|
||||
var h v1History
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err)
|
||||
}
|
||||
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
|
||||
state := c.blobMap[blobSum]
|
||||
|
||||
history[len(history)-i-1] = ocispec.History{
|
||||
Author: h.Author,
|
||||
Comment: h.Comment,
|
||||
Created: &h.Created,
|
||||
CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "),
|
||||
EmptyLayer: state.empty,
|
||||
}
|
||||
|
||||
if !state.empty {
|
||||
diffIDs = append([]digest.Digest{state.diffID}, diffIDs...)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return history, diffIDs, nil
|
||||
}
|
||||
|
||||
type fsLayer struct {
|
||||
BlobSum digest.Digest `json:"blobSum"`
|
||||
}
|
||||
|
||||
type history struct {
|
||||
V1Compatibility string `json:"v1Compatibility"`
|
||||
}
|
||||
|
||||
type manifest struct {
|
||||
FSLayers []fsLayer `json:"fsLayers"`
|
||||
History []history `json:"history"`
|
||||
Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest
|
||||
Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index
|
||||
}
|
||||
|
||||
type v1History struct {
|
||||
Author string `json:"author,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
ThrowAway *bool `json:"throwaway,omitempty"`
|
||||
Size *int `json:"Size,omitempty"` // used before ThrowAway field
|
||||
ContainerConfig struct {
|
||||
Cmd []string `json:"Cmd,omitempty"`
|
||||
} `json:"container_config,omitempty"`
|
||||
}
|
||||
|
||||
// isEmptyLayer returns whether the v1 compatibility history describes an
|
||||
// empty layer. A return value of true indicates the layer is empty,
|
||||
// however false does not indicate non-empty.
|
||||
func isEmptyLayer(compatHistory []byte) (bool, error) {
|
||||
var h v1History
|
||||
if err := json.Unmarshal(compatHistory, &h); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if h.ThrowAway != nil {
|
||||
return *h.ThrowAway, nil
|
||||
}
|
||||
if h.Size != nil {
|
||||
return *h.Size == 0, nil
|
||||
}
|
||||
|
||||
// If no `Size` or `throwaway` field is given, then
|
||||
// it cannot be determined whether the layer is empty
|
||||
// from the history, return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type signature struct {
|
||||
Signatures []jsParsedSignature `json:"signatures"`
|
||||
}
|
||||
|
||||
type jsParsedSignature struct {
|
||||
Protected string `json:"protected"`
|
||||
}
|
||||
|
||||
type protectedBlock struct {
|
||||
Length int `json:"formatLength"`
|
||||
Tail string `json:"formatTail"`
|
||||
}
|
||||
|
||||
// joseBase64UrlDecode decodes the given string using the standard base64 url
|
||||
// decoder but first adds the appropriate number of trailing '=' characters in
|
||||
// accordance with the jose specification.
|
||||
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
|
||||
func joseBase64UrlDecode(s string) ([]byte, error) {
|
||||
switch len(s) % 4 {
|
||||
case 0:
|
||||
case 2:
|
||||
s += "=="
|
||||
case 3:
|
||||
s += "="
|
||||
default:
|
||||
return nil, errors.New("illegal base64url string")
|
||||
}
|
||||
return base64.URLEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
func stripSignature(b []byte) ([]byte, error) {
|
||||
var sig signature
|
||||
if err := json.Unmarshal(b, &sig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(sig.Signatures) == 0 {
|
||||
return nil, errors.New("no signatures")
|
||||
}
|
||||
pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err)
|
||||
}
|
||||
|
||||
var protected protectedBlock
|
||||
if err := json.Unmarshal(pb, &protected); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if protected.Length > len(b) {
|
||||
return nil, errors.New("invalid protected length block")
|
||||
}
|
||||
|
||||
tail, err := joseBase64UrlDecode(protected.Tail)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid tail base 64 value: %w", err)
|
||||
}
|
||||
|
||||
return append(b[:protected.Length], tail...), nil
|
||||
}
|
||||
|
||||
type blobStateCalculator struct {
|
||||
empty bool
|
||||
digester digest.Digester
|
||||
}
|
||||
|
||||
func newBlobStateCalculator() *blobStateCalculator {
|
||||
return &blobStateCalculator{
|
||||
empty: true,
|
||||
digester: digest.Canonical.Digester(),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *blobStateCalculator) Write(p []byte) (int, error) {
|
||||
if c.empty {
|
||||
for _, b := range p {
|
||||
if b != 0x00 {
|
||||
c.empty = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return c.digester.Hash().Write(p)
|
||||
}
|
||||
|
||||
func (c *blobStateCalculator) State() blobState {
|
||||
return blobState{
|
||||
empty: c.empty,
|
||||
diffID: c.digester.Digest(),
|
||||
}
|
||||
}
|
@@ -88,7 +88,7 @@ func GetTokenScopes(ctx context.Context, common []string) []string {
|
||||
|
||||
l := 0
|
||||
for idx := 1; idx < len(scopes); idx++ {
|
||||
// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
|
||||
// Note: this comparison is unaware of the scope grammar (https://distribution.github.io/distribution/spec/auth/scope/)
|
||||
// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
|
||||
if scopes[l] == scopes[idx] {
|
||||
continue
|
||||
|
@@ -20,16 +20,23 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/typeurl/v2"
|
||||
)
|
||||
|
||||
var _ error = ErrUnexpectedStatus{}
|
||||
|
||||
func init() {
|
||||
typeurl.Register(&ErrUnexpectedStatus{}, "github.com/containerd/containerd/v2/core/remotes/errors", "ErrUnexpectedStatus")
|
||||
}
|
||||
|
||||
// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status
|
||||
type ErrUnexpectedStatus struct {
|
||||
Status string
|
||||
StatusCode int
|
||||
Body []byte
|
||||
RequestURL, RequestMethod string
|
||||
Status string `json:"status"`
|
||||
StatusCode int `json:"statusCode"`
|
||||
Body []byte `json:"body,omitempty"`
|
||||
RequestURL string `json:"requestURL,omitempty"`
|
||||
RequestMethod string `json:"requestMethod,omitempty"`
|
||||
}
|
||||
|
||||
func (e ErrUnexpectedStatus) Error() string {
|
||||
|
2
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go
generated
vendored
2
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go
generated
vendored
@@ -80,6 +80,8 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
||||
return "layer-" + key
|
||||
case images.IsKnownConfig(desc.MediaType):
|
||||
return "config-" + key
|
||||
case images.IsAttestationType(desc.MediaType):
|
||||
return "attestation-" + key
|
||||
default:
|
||||
log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
|
||||
return "unknown-" + key
|
||||
|
7
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go
generated
vendored
7
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/transfer"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@@ -51,6 +52,12 @@ type Resolver interface {
|
||||
Pusher(ctx context.Context, ref string) (Pusher, error)
|
||||
}
|
||||
|
||||
// ResolverWithOptions is a Resolver that also supports setting options.
|
||||
type ResolverWithOptions interface {
|
||||
Resolver
|
||||
SetOptions(options ...transfer.ImageResolverOption)
|
||||
}
|
||||
|
||||
// Fetcher fetches content.
|
||||
// A fetcher implementation may implement the FetcherByDigest interface too.
|
||||
type Fetcher interface {
|
||||
|
178
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/transfer/transfer.go
generated
vendored
Normal file
178
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/transfer/transfer.go
generated
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package transfer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
)
|
||||
|
||||
type Transferrer interface {
|
||||
Transfer(ctx context.Context, source interface{}, destination interface{}, opts ...Opt) error
|
||||
}
|
||||
|
||||
type ImageResolver interface {
|
||||
Resolve(ctx context.Context) (name string, desc ocispec.Descriptor, err error)
|
||||
}
|
||||
|
||||
type ImageResolverOptionSetter interface {
|
||||
ImageResolver
|
||||
SetResolverOptions(opts ...ImageResolverOption)
|
||||
}
|
||||
|
||||
type ImageResolverOption func(*ImageResolverOptions)
|
||||
|
||||
type ImageResolverOptions struct {
|
||||
DownloadLimiter *semaphore.Weighted
|
||||
Performances ImageResolverPerformanceSettings
|
||||
}
|
||||
|
||||
type ImageResolverPerformanceSettings struct {
|
||||
MaxConcurrentDownloads int
|
||||
ConcurrentLayerFetchBuffer int
|
||||
}
|
||||
|
||||
func WithDownloadLimiter(limiter *semaphore.Weighted) ImageResolverOption {
|
||||
return func(opts *ImageResolverOptions) {
|
||||
opts.DownloadLimiter = limiter
|
||||
}
|
||||
}
|
||||
|
||||
func WithMaxConcurrentDownloads(maxConcurrentDownloads int) ImageResolverOption {
|
||||
return func(opts *ImageResolverOptions) {
|
||||
opts.Performances.MaxConcurrentDownloads = maxConcurrentDownloads
|
||||
}
|
||||
}
|
||||
|
||||
func WithConcurrentLayerFetchBuffer(ConcurrentLayerFetchBuffer int) ImageResolverOption {
|
||||
return func(opts *ImageResolverOptions) {
|
||||
opts.Performances.ConcurrentLayerFetchBuffer = ConcurrentLayerFetchBuffer
|
||||
}
|
||||
}
|
||||
|
||||
type ImageFetcher interface {
|
||||
ImageResolver
|
||||
|
||||
Fetcher(ctx context.Context, ref string) (Fetcher, error)
|
||||
}
|
||||
|
||||
type ImagePusher interface {
|
||||
Pusher(context.Context, ocispec.Descriptor) (Pusher, error)
|
||||
}
|
||||
|
||||
type Fetcher interface {
|
||||
Fetch(context.Context, ocispec.Descriptor) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
type Pusher interface {
|
||||
Push(context.Context, ocispec.Descriptor) (content.Writer, error)
|
||||
}
|
||||
|
||||
// ImageFilterer is used to filter out child objects of an image
|
||||
type ImageFilterer interface {
|
||||
ImageFilter(images.HandlerFunc, content.Store) images.HandlerFunc
|
||||
}
|
||||
|
||||
// ImageStorer is a type which is capable of storing images for
|
||||
// the provided descriptor. The descriptor may be any type of manifest
|
||||
// including an index with multiple image references.
|
||||
type ImageStorer interface {
|
||||
Store(context.Context, ocispec.Descriptor, images.Store) ([]images.Image, error)
|
||||
}
|
||||
|
||||
// ImageGetter is type which returns an image from an image store
|
||||
type ImageGetter interface {
|
||||
Get(context.Context, images.Store) (images.Image, error)
|
||||
}
|
||||
|
||||
// ImageLookup is a type which returns images from an image store
|
||||
// based on names or prefixes
|
||||
type ImageLookup interface {
|
||||
Lookup(context.Context, images.Store) ([]images.Image, error)
|
||||
}
|
||||
|
||||
// ImageExporter exports images to a writer
|
||||
type ImageExporter interface {
|
||||
Export(context.Context, content.Store, []images.Image) error
|
||||
}
|
||||
|
||||
// ImageImporter imports an image into a content store
|
||||
type ImageImporter interface {
|
||||
Import(context.Context, content.Store) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// ImageImportStreamer returns an import streamer based on OCI or
|
||||
// Docker image tar archives. The stream should be a raw tar stream
|
||||
// and without compression.
|
||||
type ImageImportStreamer interface {
|
||||
ImportStream(context.Context) (io.Reader, string, error)
|
||||
}
|
||||
|
||||
type ImageExportStreamer interface {
|
||||
ExportStream(context.Context) (io.WriteCloser, string, error)
|
||||
}
|
||||
|
||||
type ImageUnpacker interface {
|
||||
UnpackPlatforms() []UnpackConfiguration
|
||||
}
|
||||
|
||||
// ImagePlatformsGetter is type which returns configured platforms.
|
||||
type ImagePlatformsGetter interface {
|
||||
Platforms() []ocispec.Platform
|
||||
}
|
||||
|
||||
// UnpackConfiguration specifies the platform and snapshotter to use for resolving
|
||||
// the unpack Platform, if snapshotter is not specified the platform default will
|
||||
// be used.
|
||||
type UnpackConfiguration struct {
|
||||
Platform ocispec.Platform
|
||||
Snapshotter string
|
||||
}
|
||||
|
||||
type ProgressFunc func(Progress)
|
||||
|
||||
type Config struct {
|
||||
Progress ProgressFunc
|
||||
}
|
||||
|
||||
type Opt func(*Config)
|
||||
|
||||
func WithProgress(f ProgressFunc) Opt {
|
||||
return func(opts *Config) {
|
||||
opts.Progress = f
|
||||
}
|
||||
}
|
||||
|
||||
// Progress is used to represent a particular progress event or incremental
|
||||
// update for the provided named object. The parents represent the names of
|
||||
// the objects which initiated the progress for the provided named object.
|
||||
// The name and what object it represents is determined by the implementation.
|
||||
type Progress struct {
|
||||
Event string
|
||||
Name string
|
||||
Parents []string
|
||||
Progress int64
|
||||
Total int64
|
||||
Desc *ocispec.Descriptor // since containerd v2.0
|
||||
}
|
15
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go
generated
vendored
15
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go
generated
vendored
@@ -17,21 +17,6 @@
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = "/var/lib/containerd"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/var/run/containerd"
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/var/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/var/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/var/run/containerd/fifo"
|
||||
// DefaultRuntime would be a multiple of choices, thus empty
|
||||
DefaultRuntime = ""
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = "/etc/containerd"
|
||||
)
|
||||
|
@@ -1,23 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultDiffer will set the default differ for the platform.
|
||||
// This differ should be compatible with the windows snapshotter.
|
||||
DefaultDiffer = "windows"
|
||||
)
|
16
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go
generated
vendored
16
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go
generated
vendored
@@ -17,6 +17,22 @@
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/run/containerd/fifo"
|
||||
// DefaultRuntime is the default linux runtime
|
||||
DefaultRuntime = "io.containerd.runc.v2"
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "overlayfs"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/run/containerd"
|
||||
// DefaultDiffer will set the default differ for the platform.
|
||||
DefaultDiffer = "walking"
|
||||
)
|
||||
|
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "overlayfs"
|
||||
)
|
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "windows"
|
||||
)
|
16
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go
generated
vendored
16
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
//go:build !windows && !darwin
|
||||
//go:build unix
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
@@ -19,19 +19,9 @@
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = "/etc/containerd"
|
||||
// DefaultRootDir is the default location used by containerd to store
|
||||
// persistent data
|
||||
DefaultRootDir = "/var/lib/containerd"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/run/containerd"
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/run/containerd/fifo"
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = "/etc/containerd"
|
||||
)
|
||||
|
@@ -1,4 +1,4 @@
|
||||
//go:build darwin || freebsd || solaris
|
||||
//go:build unix && !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
@@ -19,8 +19,20 @@
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultAddress is the default unix socket address
|
||||
DefaultAddress = "/var/run/containerd/containerd.sock"
|
||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
||||
DefaultDebugAddress = "/var/run/containerd/debug.sock"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/var/run/containerd/fifo"
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "native"
|
||||
// DefaultStateDir is the default location used by containerd to store
|
||||
// transient data
|
||||
DefaultStateDir = "/var/run/containerd"
|
||||
// DefaultDiffer will set the default differ for the platform.
|
||||
DefaultDiffer = "walking"
|
||||
)
|
@@ -38,9 +38,16 @@ const (
|
||||
DefaultAddress = `\\.\pipe\containerd-containerd`
|
||||
// DefaultDebugAddress is the default winpipe address for pprof data
|
||||
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
||||
// DefaultDiffer will set the default differ for the platform.
|
||||
// This differ should be compatible with the windows snapshotter.
|
||||
DefaultDiffer = "windows"
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs. Unused on Windows.
|
||||
DefaultFIFODir = ""
|
||||
// DefaultRuntime is the default windows runtime
|
||||
DefaultRuntime = "io.containerd.runhcs.v1"
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "windows"
|
||||
)
|
||||
|
86
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/internal/lazyregexp/lazyregexp.go
generated
vendored
Normal file
86
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/internal/lazyregexp/lazyregexp.go
generated
vendored
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code below was largely copied from golang.org/x/mod@v0.22;
|
||||
// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go
|
||||
// with some additional methods added.
|
||||
|
||||
// Package lazyregexp is a thin wrapper over regexp, allowing the use of global
|
||||
// regexp variables without forcing them to be compiled at init.
|
||||
package lazyregexp
|
||||
|
||||
import (
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be
|
||||
// compiled the first time it is needed.
|
||||
type Regexp struct {
|
||||
str string
|
||||
once sync.Once
|
||||
rx *regexp.Regexp
|
||||
}
|
||||
|
||||
func (re *Regexp) re() *regexp.Regexp {
|
||||
re.once.Do(re.build)
|
||||
return re.rx
|
||||
}
|
||||
|
||||
func (re *Regexp) build() {
|
||||
re.rx = regexp.MustCompile(re.str)
|
||||
re.str = ""
|
||||
}
|
||||
|
||||
func (re *Regexp) FindStringIndex(s string) (loc []int) {
|
||||
return re.re().FindStringIndex(s)
|
||||
}
|
||||
|
||||
func (re *Regexp) FindStringSubmatch(s string) []string {
|
||||
return re.re().FindStringSubmatch(s)
|
||||
}
|
||||
|
||||
func (re *Regexp) MatchString(s string) bool {
|
||||
return re.re().MatchString(s)
|
||||
}
|
||||
|
||||
func (re *Regexp) ReplaceAll(src, repl []byte) []byte {
|
||||
return re.re().ReplaceAll(src, repl)
|
||||
}
|
||||
|
||||
func (re *Regexp) String() string {
|
||||
return re.re().String()
|
||||
}
|
||||
|
||||
var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test")
|
||||
|
||||
// New creates a new lazy regexp, delaying the compiling work until it is first
|
||||
// needed. If the code is being run as part of tests, the regexp compiling will
|
||||
// happen immediately.
|
||||
func New(str string) *Regexp {
|
||||
lr := &Regexp{str: str}
|
||||
if inTest {
|
||||
// In tests, always compile the regexps early.
|
||||
lr.re()
|
||||
}
|
||||
return lr
|
||||
}
|
@@ -220,7 +220,9 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
|
||||
},
|
||||
}, nil
|
||||
case Zstd:
|
||||
zstdReader, err := zstd.NewReader(buf)
|
||||
zstdReader, err := zstd.NewReader(buf,
|
||||
zstd.WithDecoderLowmem(false),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -1,28 +0,0 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package compression
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
)
|
||||
|
||||
func FuzzDecompressStream(data []byte) int {
|
||||
_, _ = DecompressStream(bytes.NewReader(data))
|
||||
return 1
|
||||
}
|
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deprecation
|
||||
|
||||
type Warning string
|
||||
|
||||
const (
|
||||
// Prefix is a standard prefix for all Warnings, used for filtering plugin Exports
|
||||
Prefix = "io.containerd.deprecation/"
|
||||
// PullSchema1Image is a warning for the use of schema 1 images
|
||||
PullSchema1Image Warning = Prefix + "pull-schema-1-image"
|
||||
// GoPluginLibrary is a warning for the use of dynamic library Go plugins
|
||||
GoPluginLibrary Warning = Prefix + "go-plugin-library"
|
||||
// CRIRegistryMirrors is a warning for the use of the `mirrors` property
|
||||
CRIRegistryMirrors Warning = Prefix + "cri-registry-mirrors"
|
||||
// CRIRegistryAuths is a warning for the use of the `auths` property
|
||||
CRIRegistryAuths Warning = Prefix + "cri-registry-auths"
|
||||
// CRIRegistryConfigs is a warning for the use of the `configs` property
|
||||
CRIRegistryConfigs Warning = Prefix + "cri-registry-configs"
|
||||
// OTLPTracingConfig is a warning for the use of the `otlp` property
|
||||
TracingOTLPConfig Warning = Prefix + "tracing-processor-config"
|
||||
// TracingServiceConfig is a warning for the use of the `tracing` property
|
||||
TracingServiceConfig Warning = Prefix + "tracing-service-config"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvPrefix = "CONTAINERD_ENABLE_DEPRECATED_"
|
||||
EnvPullSchema1Image = EnvPrefix + "PULL_SCHEMA_1_IMAGE"
|
||||
)
|
||||
|
||||
var messages = map[Warning]string{
|
||||
PullSchema1Image: "Schema 1 images are deprecated since containerd v1.7, disabled in containerd v2.0, and will be removed in containerd v2.1. " +
|
||||
`Since containerd v1.7.8, schema 1 images are identified by the "io.containerd.image/converted-docker-schema1" label.`,
|
||||
GoPluginLibrary: "Dynamically-linked Go plugins as containerd runtimes are deprecated since containerd v2.0 and removed in containerd v2.1.",
|
||||
CRIRegistryMirrors: "The `mirrors` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." +
|
||||
"Use `config_path` instead.",
|
||||
CRIRegistryAuths: "The `auths` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.3 and will be removed in containerd v2.1." +
|
||||
"Use `ImagePullSecrets` instead.",
|
||||
CRIRegistryConfigs: "The `configs` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." +
|
||||
"Use `config_path` instead.",
|
||||
|
||||
TracingOTLPConfig: "The `otlp` property of `[plugins.\"io.containerd.tracing.processor.v1\".otlp]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." +
|
||||
"Use OTLP environment variables instead: https://opentelemetry.io/docs/specs/otel/protocol/exporter/",
|
||||
TracingServiceConfig: "The `tracing` property of `[plugins.\"io.containerd.internal.v1\".tracing]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." +
|
||||
"Use OTEL environment variables instead: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/",
|
||||
}
|
||||
|
||||
// Valid checks whether a given Warning is valid
|
||||
func Valid(id Warning) bool {
|
||||
_, ok := messages[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Message returns the human-readable message for a given Warning
|
||||
func Message(id Warning) (string, bool) {
|
||||
msg, ok := messages[id]
|
||||
return msg, ok
|
||||
}
|
@@ -26,8 +26,8 @@ package identifiers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/v2/internal/lazyregexp"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
@@ -39,7 +39,7 @@ const (
|
||||
|
||||
var (
|
||||
// identifierRe defines the pattern for valid identifiers.
|
||||
identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*"))
|
||||
identifierRe = lazyregexp.New(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*"))
|
||||
)
|
||||
|
||||
// Validate returns nil if the string s is a valid identifier.
|
||||
|
10
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/namespaces/ttrpc.go
generated
vendored
10
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/namespaces/ttrpc.go
generated
vendored
@@ -27,20 +27,12 @@ const (
|
||||
TTRPCHeader = "containerd-namespace-ttrpc"
|
||||
)
|
||||
|
||||
func copyMetadata(src ttrpc.MD) ttrpc.MD {
|
||||
md := ttrpc.MD{}
|
||||
for k, v := range src {
|
||||
md[k] = append(md[k], v...)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
|
||||
md, ok := ttrpc.GetMetadata(ctx)
|
||||
if !ok {
|
||||
md = ttrpc.MD{}
|
||||
} else {
|
||||
md = copyMetadata(md)
|
||||
md = md.Clone()
|
||||
}
|
||||
md.Set(TTRPCHeader, namespace)
|
||||
return ttrpc.WithMetadata(ctx, md)
|
||||
|
@@ -20,9 +20,9 @@ import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/internal/lazyregexp"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
@@ -80,7 +80,7 @@ type Spec struct {
|
||||
Object string
|
||||
}
|
||||
|
||||
var splitRe = regexp.MustCompile(`[:@]`)
|
||||
var splitRe = lazyregexp.New(`[:@]`)
|
||||
|
||||
// Parse parses the string into a structured ref.
|
||||
func Parse(s string) (Spec, error) {
|
||||
|
@@ -1,76 +0,0 @@
|
||||
//go:build gofuzz
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
_ "crypto/sha256"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
)
|
||||
|
||||
func FuzzContentStoreWriter(data []byte) int {
|
||||
t := &testing.T{}
|
||||
ctx := context.Background()
|
||||
ctx, _, cs, cleanup := contentStoreEnv(t)
|
||||
defer cleanup()
|
||||
|
||||
cw, err := cs.Writer(ctx, content.WithRef("myref"))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if err := cw.Close(); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
// reopen, so we can test things
|
||||
cw, err = cs.Writer(ctx, content.WithRef("myref"))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data))))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
expected := digest.FromBytes(data)
|
||||
|
||||
if err = cw.Commit(ctx, int64(len(data)), expected); err != nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error {
|
||||
nn, err := io.Copy(dst, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if nn != size {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
)
|
||||
|
||||
func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) {
|
||||
tmpdir := t.TempDir()
|
||||
|
||||
cs, err := NewStore(tmpdir)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
return ctx, tmpdir, cs, func() {
|
||||
cancel()
|
||||
}
|
||||
}
|
3
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/version/version.go
generated
vendored
3
src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/version/version.go
generated
vendored
@@ -19,11 +19,12 @@ package version
|
||||
import "runtime"
|
||||
|
||||
var (
|
||||
Name = "containerd"
|
||||
// Package is filled at linking time
|
||||
Package = "github.com/containerd/containerd/v2"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "2.0.2+unknown"
|
||||
Version = "2.1.3+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
96
src/cmd/linuxkit/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go
generated
vendored
Normal file
96
src/cmd/linuxkit/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go
generated
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errhttp provides utility functions for translating errors to
|
||||
// and from a HTTP context.
|
||||
//
|
||||
// The functions ToHTTP and ToNative can be used to map server-side and
|
||||
// client-side errors to the correct types.
|
||||
package errhttp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/internal/cause"
|
||||
)
|
||||
|
||||
// ToHTTP returns the best status code for the given error
|
||||
func ToHTTP(err error) int {
|
||||
switch {
|
||||
case errdefs.IsNotFound(err):
|
||||
return http.StatusNotFound
|
||||
case errdefs.IsInvalidArgument(err):
|
||||
return http.StatusBadRequest
|
||||
case errdefs.IsConflict(err):
|
||||
return http.StatusConflict
|
||||
case errdefs.IsNotModified(err):
|
||||
return http.StatusNotModified
|
||||
case errdefs.IsFailedPrecondition(err):
|
||||
return http.StatusPreconditionFailed
|
||||
case errdefs.IsUnauthorized(err):
|
||||
return http.StatusUnauthorized
|
||||
case errdefs.IsPermissionDenied(err):
|
||||
return http.StatusForbidden
|
||||
case errdefs.IsResourceExhausted(err):
|
||||
return http.StatusTooManyRequests
|
||||
case errdefs.IsInternal(err):
|
||||
return http.StatusInternalServerError
|
||||
case errdefs.IsNotImplemented(err):
|
||||
return http.StatusNotImplemented
|
||||
case errdefs.IsUnavailable(err):
|
||||
return http.StatusServiceUnavailable
|
||||
case errdefs.IsUnknown(err):
|
||||
var unexpected cause.ErrUnexpectedStatus
|
||||
if errors.As(err, &unexpected) && unexpected.Status >= 200 && unexpected.Status < 600 {
|
||||
return unexpected.Status
|
||||
}
|
||||
return http.StatusInternalServerError
|
||||
default:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
}
|
||||
|
||||
// ToNative returns the error best matching the HTTP status code
|
||||
func ToNative(statusCode int) error {
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
return errdefs.ErrNotFound
|
||||
case http.StatusBadRequest:
|
||||
return errdefs.ErrInvalidArgument
|
||||
case http.StatusConflict:
|
||||
return errdefs.ErrConflict
|
||||
case http.StatusPreconditionFailed:
|
||||
return errdefs.ErrFailedPrecondition
|
||||
case http.StatusUnauthorized:
|
||||
return errdefs.ErrUnauthenticated
|
||||
case http.StatusForbidden:
|
||||
return errdefs.ErrPermissionDenied
|
||||
case http.StatusNotModified:
|
||||
return errdefs.ErrNotModified
|
||||
case http.StatusTooManyRequests:
|
||||
return errdefs.ErrResourceExhausted
|
||||
case http.StatusInternalServerError:
|
||||
return errdefs.ErrInternal
|
||||
case http.StatusNotImplemented:
|
||||
return errdefs.ErrNotImplemented
|
||||
case http.StatusServiceUnavailable:
|
||||
return errdefs.ErrUnavailable
|
||||
default:
|
||||
return cause.ErrUnexpectedStatus{Status: statusCode}
|
||||
}
|
||||
}
|
31
src/cmd/linuxkit/vendor/github.com/docker/cli/AUTHORS
generated
vendored
31
src/cmd/linuxkit/vendor/github.com/docker/cli/AUTHORS
generated
vendored
@@ -48,6 +48,7 @@ Alfred Landrum <alfred.landrum@docker.com>
|
||||
Ali Rostami <rostami.ali@gmail.com>
|
||||
Alicia Lauerman <alicia@eta.im>
|
||||
Allen Sun <allensun.shl@alibaba-inc.com>
|
||||
Allie Sadler <allie.sadler@docker.com>
|
||||
Alvin Deng <alvin.q.deng@utexas.edu>
|
||||
Amen Belayneh <amenbelayneh@gmail.com>
|
||||
Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com>
|
||||
@@ -81,6 +82,7 @@ Antonis Kalipetis <akalipetis@gmail.com>
|
||||
Anusha Ragunathan <anusha.ragunathan@docker.com>
|
||||
Ao Li <la9249@163.com>
|
||||
Arash Deshmeh <adeshmeh@ca.ibm.com>
|
||||
Archimedes Trajano <developer@trajano.net>
|
||||
Arko Dasgupta <arko@tetrate.io>
|
||||
Arnaud Porterie <icecrime@gmail.com>
|
||||
Arnaud Rebillout <elboulangero@gmail.com>
|
||||
@@ -88,6 +90,7 @@ Arthur Peka <arthur.peka@outlook.com>
|
||||
Ashly Mathew <ashly.mathew@sap.com>
|
||||
Ashwini Oruganti <ashwini.oruganti@gmail.com>
|
||||
Aslam Ahemad <aslamahemad@gmail.com>
|
||||
Austin Vazquez <austin.vazquez.dev@gmail.com>
|
||||
Azat Khuyiyakhmetov <shadow_uz@mail.ru>
|
||||
Bardia Keyoumarsi <bkeyouma@ucsc.edu>
|
||||
Barnaby Gray <barnaby@pickle.me.uk>
|
||||
@@ -132,6 +135,7 @@ Cao Weiwei <cao.weiwei30@zte.com.cn>
|
||||
Carlo Mion <mion00@gmail.com>
|
||||
Carlos Alexandro Becker <caarlos0@gmail.com>
|
||||
Carlos de Paula <me@carlosedp.com>
|
||||
Carston Schilds <Carston.Schilds@visier.com>
|
||||
Casey Korver <casey@korver.dev>
|
||||
Ce Gao <ce.gao@outlook.com>
|
||||
Cedric Davies <cedricda@microsoft.com>
|
||||
@@ -189,6 +193,7 @@ Daisuke Ito <itodaisuke00@gmail.com>
|
||||
dalanlan <dalanlan925@gmail.com>
|
||||
Damien Nadé <github@livna.org>
|
||||
Dan Cotora <dan@bluevision.ro>
|
||||
Dan Wallis <dan@wallis.nz>
|
||||
Danial Gharib <danial.mail.gh@gmail.com>
|
||||
Daniel Artine <daniel.artine@ufrj.br>
|
||||
Daniel Cassidy <mail@danielcassidy.me.uk>
|
||||
@@ -237,6 +242,7 @@ Deshi Xiao <dxiao@redhat.com>
|
||||
Dharmit Shah <shahdharmit@gmail.com>
|
||||
Dhawal Yogesh Bhanushali <dbhanushali@vmware.com>
|
||||
Dieter Reuter <dieter.reuter@me.com>
|
||||
Dilep Dev <34891655+DilepDev@users.noreply.github.com>
|
||||
Dima Stopel <dima@twistlock.com>
|
||||
Dimitry Andric <d.andric@activevideo.com>
|
||||
Ding Fei <dingfei@stars.org.cn>
|
||||
@@ -308,6 +314,8 @@ George MacRorie <gmacr31@gmail.com>
|
||||
George Margaritis <gmargaritis@protonmail.com>
|
||||
George Xie <georgexsh@gmail.com>
|
||||
Gianluca Borello <g.borello@gmail.com>
|
||||
Giau. Tran Minh <hello@giautm.dev>
|
||||
Giedrius Jonikas <giedriusj1@gmail.com>
|
||||
Gildas Cuisinier <gildas.cuisinier@gcuisinier.net>
|
||||
Gio d'Amelio <giodamelio@gmail.com>
|
||||
Gleb Stsenov <gleb.stsenov@gmail.com>
|
||||
@@ -344,6 +352,7 @@ Hugo Gabriel Eyherabide <hugogabriel.eyherabide@gmail.com>
|
||||
huqun <huqun@zju.edu.cn>
|
||||
Huu Nguyen <huu@prismskylabs.com>
|
||||
Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
|
||||
Iain MacDonald <IJMacD@gmail.com>
|
||||
Iain Samuel McLean Elder <iain@isme.es>
|
||||
Ian Campbell <ian.campbell@docker.com>
|
||||
Ian Philpot <ian.philpot@microsoft.com>
|
||||
@@ -393,6 +402,7 @@ Jesse Adametz <jesseadametz@gmail.com>
|
||||
Jessica Frazelle <jess@oxide.computer>
|
||||
Jezeniel Zapanta <jpzapanta22@gmail.com>
|
||||
Jian Zhang <zhangjian.fnst@cn.fujitsu.com>
|
||||
Jianyong Wu <wujianyong@hygon.cn>
|
||||
Jie Luo <luo612@zju.edu.cn>
|
||||
Jilles Oldenbeuving <ojilles@gmail.com>
|
||||
Jim Chen <njucjc@gmail.com>
|
||||
@@ -446,6 +456,7 @@ Julian <gitea+julian@ic.thejulian.uk>
|
||||
Julien Barbier <write0@gmail.com>
|
||||
Julien Kassar <github@kassisol.com>
|
||||
Julien Maitrehenry <julien.maitrehenry@me.com>
|
||||
Julio Cesar Garcia <juliogarciamelgarejo@gmail.com>
|
||||
Justas Brazauskas <brazauskasjustas@gmail.com>
|
||||
Justin Chadwell <me@jedevc.com>
|
||||
Justin Cormack <justin.cormack@docker.com>
|
||||
@@ -490,19 +501,22 @@ Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||
Kyle Mitofsky <Kylemit@gmail.com>
|
||||
Lachlan Cooper <lachlancooper@gmail.com>
|
||||
Lai Jiangshan <jiangshanlai@gmail.com>
|
||||
Lajos Papp <lajos.papp@sequenceiq.com>
|
||||
Lars Kellogg-Stedman <lars@redhat.com>
|
||||
Laura Brehm <laurabrehm@hey.com>
|
||||
Laura Frank <ljfrank@gmail.com>
|
||||
Laurent Erignoux <lerignoux@gmail.com>
|
||||
Laurent Goderre <laurent.goderre@docker.com>
|
||||
Lee Gaines <eightlimbed@gmail.com>
|
||||
Lei Jitang <leijitang@huawei.com>
|
||||
Lennie <github@consolejunkie.net>
|
||||
lentil32 <lentil32@icloud.com>
|
||||
Leo Gallucci <elgalu3@gmail.com>
|
||||
Leonid Skorospelov <leosko94@gmail.com>
|
||||
Lewis Daly <lewisdaly@me.com>
|
||||
Li Fu Bang <lifubang@acmcoder.com>
|
||||
Li Yi <denverdino@gmail.com>
|
||||
Li Yi <weiyuan.yl@alibaba-inc.com>
|
||||
Li Zeghong <zeghong@hotmail.com>
|
||||
Liang-Chi Hsieh <viirya@gmail.com>
|
||||
Lihua Tang <lhtang@alauda.io>
|
||||
Lily Guo <lily.guo@docker.com>
|
||||
@@ -515,6 +529,7 @@ lixiaobing10051267 <li.xiaobing1@zte.com.cn>
|
||||
Lloyd Dewolf <foolswisdom@gmail.com>
|
||||
Lorenzo Fontana <lo@linux.com>
|
||||
Louis Opter <kalessin@kalessin.fr>
|
||||
Lovekesh Kumar <lovekesh.kumar@rtcamp.com>
|
||||
Luca Favatella <luca.favatella@erlang-solutions.com>
|
||||
Luca Marturana <lucamarturana@gmail.com>
|
||||
Lucas Chan <lucas-github@lucaschan.com>
|
||||
@@ -559,6 +574,7 @@ Matt Robenolt <matt@ydekproductions.com>
|
||||
Matteo Orefice <matteo.orefice@bites4bits.software>
|
||||
Matthew Heon <mheon@redhat.com>
|
||||
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||
Matthieu MOREL <matthieu.morel35@gmail.com>
|
||||
Mauro Porras P <mauroporrasp@gmail.com>
|
||||
Max Shytikov <mshytikov@gmail.com>
|
||||
Max-Julian Pogner <max-julian@pogner.at>
|
||||
@@ -566,6 +582,7 @@ Maxime Petazzoni <max@signalfuse.com>
|
||||
Maximillian Fan Xavier <maximillianfx@gmail.com>
|
||||
Mei ChunTao <mei.chuntao@zte.com.cn>
|
||||
Melroy van den Berg <melroy@melroy.org>
|
||||
Mert Şişmanoğlu <mert190737fb@gmail.com>
|
||||
Metal <2466052+tedhexaflow@users.noreply.github.com>
|
||||
Micah Zoltu <micah@newrelic.com>
|
||||
Michael A. Smith <michael@smith-li.com>
|
||||
@@ -598,7 +615,9 @@ Mindaugas Rukas <momomg@gmail.com>
|
||||
Miroslav Gula <miroslav.gula@naytrolabs.com>
|
||||
Misty Stanley-Jones <misty@docker.com>
|
||||
Mohammad Banikazemi <mb@us.ibm.com>
|
||||
Mohammad Hossein <mhm98035@gmail.com>
|
||||
Mohammed Aaqib Ansari <maaquib@gmail.com>
|
||||
Mohammed Aminu Futa <mohammedfuta2000@gmail.com>
|
||||
Mohini Anne Dsouza <mohini3917@gmail.com>
|
||||
Moorthy RS <rsmoorthy@gmail.com>
|
||||
Morgan Bauer <mbauer@us.ibm.com>
|
||||
@@ -633,9 +652,11 @@ Nicolas De Loof <nicolas.deloof@gmail.com>
|
||||
Nikhil Chawla <chawlanikhil24@gmail.com>
|
||||
Nikolas Garofil <nikolas.garofil@uantwerpen.be>
|
||||
Nikolay Milovanov <nmil@itransformers.net>
|
||||
NinaLua <iturf@sina.cn>
|
||||
Nir Soffer <nsoffer@redhat.com>
|
||||
Nishant Totla <nishanttotla@gmail.com>
|
||||
NIWA Hideyuki <niwa.niwa@nifty.ne.jp>
|
||||
Noah Silas <noah@hustle.com>
|
||||
Noah Treuhaft <noah.treuhaft@docker.com>
|
||||
O.S. Tezer <ostezer@gmail.com>
|
||||
Oded Arbel <oded@geek.co.il>
|
||||
@@ -653,10 +674,12 @@ Patrick Böänziger <patrick.baenziger@bsi-software.com>
|
||||
Patrick Daigle <114765035+pdaig@users.noreply.github.com>
|
||||
Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick Lang <plang@microsoft.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Paul <paul9869@gmail.com>
|
||||
Paul Kehrer <paul.l.kehrer@gmail.com>
|
||||
Paul Lietar <paul@lietar.net>
|
||||
Paul Mulders <justinkb@gmail.com>
|
||||
Paul Rogalski <mail@paul-rogalski.de>
|
||||
Paul Seyfert <pseyfert.mathphys@gmail.com>
|
||||
Paul Weaver <pauweave@cisco.com>
|
||||
Pavel Pospisil <pospispa@gmail.com>
|
||||
@@ -678,7 +701,6 @@ Philip Alexander Etling <paetling@gmail.com>
|
||||
Philipp Gillé <philipp.gille@gmail.com>
|
||||
Philipp Schmied <pschmied@schutzwerk.com>
|
||||
Phong Tran <tran.pho@northeastern.edu>
|
||||
pidster <pid@pidster.com>
|
||||
Pieter E Smit <diepes@github.com>
|
||||
pixelistik <pixelistik@users.noreply.github.com>
|
||||
Pratik Karki <prertik@outlook.com>
|
||||
@@ -738,6 +760,7 @@ Samuel Cochran <sj26@sj26.com>
|
||||
Samuel Karp <skarp@amazon.com>
|
||||
Sandro Jäckel <sandro.jaeckel@gmail.com>
|
||||
Santhosh Manohar <santhosh@docker.com>
|
||||
Sarah Sanders <sarah.sanders@docker.com>
|
||||
Sargun Dhillon <sargun@netflix.com>
|
||||
Saswat Bhattacharya <sas.saswat@gmail.com>
|
||||
Saurabh Kumar <saurabhkumar0184@gmail.com>
|
||||
@@ -770,6 +793,7 @@ Spencer Brown <spencer@spencerbrown.org>
|
||||
Spring Lee <xi.shuai@outlook.com>
|
||||
squeegels <lmscrewy@gmail.com>
|
||||
Srini Brahmaroutu <srbrahma@us.ibm.com>
|
||||
Stavros Panakakis <stavrospanakakis@gmail.com>
|
||||
Stefan S. <tronicum@user.github.com>
|
||||
Stefan Scherer <stefan.scherer@docker.com>
|
||||
Stefan Weil <sw@weilnetz.de>
|
||||
@@ -780,6 +804,7 @@ Steve Durrheimer <s.durrheimer@gmail.com>
|
||||
Steve Richards <steve.richards@docker.com>
|
||||
Steven Burgess <steven.a.burgess@hotmail.com>
|
||||
Stoica-Marcu Floris-Andrei <floris.sm@gmail.com>
|
||||
Stuart Williams <pid@pidster.com>
|
||||
Subhajit Ghosh <isubuz.g@gmail.com>
|
||||
Sun Jianbo <wonderflow.sun@gmail.com>
|
||||
Sune Keller <absukl@almbrand.dk>
|
||||
@@ -867,6 +892,7 @@ Wang Yumu <37442693@qq.com>
|
||||
Wataru Ishida <ishida.wataru@lab.ntt.co.jp>
|
||||
Wayne Song <wsong@docker.com>
|
||||
Wen Cheng Ma <wenchma@cn.ibm.com>
|
||||
Wenlong Zhang <zhangwenlong@loongson.cn>
|
||||
Wenzhi Liang <wenzhi.liang@gmail.com>
|
||||
Wes Morgan <cap10morgan@gmail.com>
|
||||
Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
|
||||
@@ -908,3 +934,4 @@ Zhuo Zhi <h.dwwwwww@gmail.com>
|
||||
Átila Camurça Alves <camurca.home@gmail.com>
|
||||
Александр Менщиков <__Singleton__@hackerdom.ru>
|
||||
徐俊杰 <paco.xu@daocloud.io>
|
||||
林博仁 Buo-ren Lin <Buo.Ren.Lin@gmail.com>
|
||||
|
7
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
7
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
@@ -58,7 +58,7 @@ func resetConfigDir() {
|
||||
// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker
|
||||
// as dependency for consumers that only need to read the config-file.
|
||||
//
|
||||
// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get
|
||||
// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get
|
||||
func getHomeDir() string {
|
||||
home, _ := os.UserHomeDir()
|
||||
if home == "" && runtime.GOOS != "windows" {
|
||||
@@ -69,6 +69,11 @@ func getHomeDir() string {
|
||||
return home
|
||||
}
|
||||
|
||||
// Provider defines an interface for providing the CLI config.
|
||||
type Provider interface {
|
||||
ConfigFile() *configfile.ConfigFile
|
||||
}
|
||||
|
||||
// Dir returns the directory the configuration file is stored in
|
||||
func Dir() string {
|
||||
initConfigDir.Do(func() {
|
||||
|
17
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
17
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
@@ -36,12 +36,14 @@ type ConfigFile struct {
|
||||
NodesFormat string `json:"nodesFormat,omitempty"`
|
||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
CurrentContext string `json:"currentContext,omitempty"`
|
||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||
Aliases map[string]string `json:"aliases,omitempty"`
|
||||
Features map[string]string `json:"features,omitempty"`
|
||||
|
||||
// Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release.
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyConfig contains proxy configuration settings
|
||||
@@ -150,7 +152,8 @@ func (configFile *ConfigFile) Save() (retErr error) {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
temp.Close()
|
||||
// ignore error as the file may already be closed when we reach this.
|
||||
_ = temp.Close()
|
||||
if retErr != nil {
|
||||
if err := os.Remove(temp.Name()); err != nil {
|
||||
logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file")
|
||||
@@ -167,10 +170,16 @@ func (configFile *ConfigFile) Save() (retErr error) {
|
||||
return errors.Wrap(err, "error closing temp file")
|
||||
}
|
||||
|
||||
// Handle situation where the configfile is a symlink
|
||||
// Handle situation where the configfile is a symlink, and allow for dangling symlinks
|
||||
cfgFile := configFile.Filename
|
||||
if f, err := os.Readlink(cfgFile); err == nil {
|
||||
if f, err := filepath.EvalSymlinks(cfgFile); err == nil {
|
||||
cfgFile = f
|
||||
} else if os.IsNotExist(err) {
|
||||
// extract the path from the error if the configfile does not exist or is a dangling symlink
|
||||
var pathError *os.PathError
|
||||
if errors.As(err, &pathError) {
|
||||
cfgFile = pathError.Path
|
||||
}
|
||||
}
|
||||
|
||||
// Try copying the current config file (if any) ownership and permissions
|
||||
|
11
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
generated
vendored
11
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
generated
vendored
@@ -28,7 +28,6 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -149,7 +148,7 @@ func (c *commandConn) handleEOF(err error) error {
|
||||
c.stderrMu.Lock()
|
||||
stderr := c.stderr.String()
|
||||
c.stderrMu.Unlock()
|
||||
return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr)
|
||||
return fmt.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +158,7 @@ func (c *commandConn) handleEOF(err error) error {
|
||||
c.stderrMu.Lock()
|
||||
stderr := c.stderr.String()
|
||||
c.stderrMu.Unlock()
|
||||
return errors.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr)
|
||||
return fmt.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr)
|
||||
}
|
||||
|
||||
func ignorableCloseError(err error) bool {
|
||||
@@ -253,17 +252,17 @@ func (c *commandConn) RemoteAddr() net.Addr {
|
||||
return c.remoteAddr
|
||||
}
|
||||
|
||||
func (c *commandConn) SetDeadline(t time.Time) error {
|
||||
func (*commandConn) SetDeadline(t time.Time) error {
|
||||
logrus.Debugf("unimplemented call: SetDeadline(%v)", t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) SetReadDeadline(t time.Time) error {
|
||||
func (*commandConn) SetReadDeadline(t time.Time) error {
|
||||
logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *commandConn) SetWriteDeadline(t time.Time) error {
|
||||
func (*commandConn) SetWriteDeadline(t time.Time) error {
|
||||
logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t)
|
||||
return nil
|
||||
}
|
||||
|
6
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
generated
vendored
6
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go
generated
vendored
@@ -3,13 +3,13 @@ package connhelper
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/connhelper/commandconn"
|
||||
"github.com/docker/cli/cli/connhelper/ssh"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ConnectionHelper allows to connect to a remote host with custom stream provider binary.
|
||||
@@ -41,9 +41,9 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme == "ssh" {
|
||||
sp, err := ssh.ParseURL(daemonURL)
|
||||
sp, err := ssh.NewSpec(u)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "ssh host connection is not valid")
|
||||
return nil, fmt.Errorf("ssh host connection is not valid: %w", err)
|
||||
}
|
||||
sshFlags = addSSHTimeout(sshFlags)
|
||||
sshFlags = disablePseudoTerminalAllocation(sshFlags)
|
||||
|
46
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
generated
vendored
46
src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go
generated
vendored
@@ -2,19 +2,46 @@
|
||||
package ssh
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// ParseURL parses URL
|
||||
// ParseURL creates a [Spec] from the given ssh URL. It returns an error if
|
||||
// the URL is using the wrong scheme, contains fragments, query-parameters,
|
||||
// or contains a password.
|
||||
func ParseURL(daemonURL string) (*Spec, error) {
|
||||
u, err := url.Parse(daemonURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
var urlErr *url.Error
|
||||
if errors.As(err, &urlErr) {
|
||||
err = urlErr.Unwrap()
|
||||
}
|
||||
return nil, fmt.Errorf("invalid SSH URL: %w", err)
|
||||
}
|
||||
return NewSpec(u)
|
||||
}
|
||||
|
||||
// NewSpec creates a [Spec] from the given ssh URL's properties. It returns
|
||||
// an error if the URL is using the wrong scheme, contains fragments,
|
||||
// query-parameters, or contains a password.
|
||||
func NewSpec(sshURL *url.URL) (*Spec, error) {
|
||||
s, err := newSpec(sshURL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid SSH URL: %w", err)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func newSpec(u *url.URL) (*Spec, error) {
|
||||
if u == nil {
|
||||
return nil, errors.New("URL is nil")
|
||||
}
|
||||
if u.Scheme == "" {
|
||||
return nil, errors.New("no scheme provided")
|
||||
}
|
||||
if u.Scheme != "ssh" {
|
||||
return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme)
|
||||
return nil, errors.New("incorrect scheme: " + u.Scheme)
|
||||
}
|
||||
|
||||
var sp Spec
|
||||
@@ -27,17 +54,18 @@ func ParseURL(daemonURL string) (*Spec, error) {
|
||||
}
|
||||
sp.Host = u.Hostname()
|
||||
if sp.Host == "" {
|
||||
return nil, errors.Errorf("no host specified")
|
||||
return nil, errors.New("hostname is empty")
|
||||
}
|
||||
sp.Port = u.Port()
|
||||
sp.Path = u.Path
|
||||
if u.RawQuery != "" {
|
||||
return nil, errors.Errorf("extra query after the host: %q", u.RawQuery)
|
||||
return nil, fmt.Errorf("query parameters are not allowed: %q", u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment)
|
||||
return nil, fmt.Errorf("fragments are not allowed: %q", u.Fragment)
|
||||
}
|
||||
return &sp, err
|
||||
|
||||
return &sp, nil
|
||||
}
|
||||
|
||||
// Spec of SSH URL
|
||||
|
29
src/cmd/linuxkit/vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
29
src/cmd/linuxkit/vendor/github.com/docker/docker-credential-helpers/client/command.go
generated
vendored
@@ -15,27 +15,30 @@ type Program interface {
|
||||
// ProgramFunc is a type of function that initializes programs based on arguments.
|
||||
type ProgramFunc func(args ...string) Program
|
||||
|
||||
// NewShellProgramFunc creates programs that are executed in a Shell.
|
||||
func NewShellProgramFunc(name string) ProgramFunc {
|
||||
return NewShellProgramFuncWithEnv(name, nil)
|
||||
}
|
||||
|
||||
// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables
|
||||
func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc {
|
||||
// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell].
|
||||
func NewShellProgramFunc(command string) ProgramFunc {
|
||||
return func(args ...string) Program {
|
||||
return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)}
|
||||
return createProgramCmdRedirectErr(command, args, nil)
|
||||
}
|
||||
}
|
||||
|
||||
func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd {
|
||||
programCmd := exec.Command(commandName, args...)
|
||||
// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command
|
||||
// in a [Shell] with the given environment variables.
|
||||
func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc {
|
||||
return func(args ...string) Program {
|
||||
return createProgramCmdRedirectErr(command, args, env)
|
||||
}
|
||||
}
|
||||
|
||||
func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell {
|
||||
ec := exec.Command(command, args...)
|
||||
if env != nil {
|
||||
for k, v := range *env {
|
||||
programCmd.Env = append(programCmd.Environ(), k+"="+v)
|
||||
ec.Env = append(ec.Environ(), k+"="+v)
|
||||
}
|
||||
}
|
||||
programCmd.Stderr = os.Stderr
|
||||
return programCmd
|
||||
ec.Stderr = os.Stderr
|
||||
return &Shell{cmd: ec}
|
||||
}
|
||||
|
||||
// Shell invokes shell commands to talk with a remote credentials-helper.
|
||||
|
12
src/cmd/linuxkit/vendor/github.com/docker/docker/AUTHORS
generated
vendored
12
src/cmd/linuxkit/vendor/github.com/docker/docker/AUTHORS
generated
vendored
@@ -2,6 +2,7 @@
|
||||
# This file lists all contributors to the repository.
|
||||
# See hack/generate-authors.sh to make modifications.
|
||||
|
||||
17neverends <ionianrise@gmail.com>
|
||||
7sunarni <710720732@qq.com>
|
||||
Aanand Prasad <aanand.prasad@gmail.com>
|
||||
Aarni Koskela <akx@iki.fi>
|
||||
@@ -189,6 +190,7 @@ Anes Hasicic <anes.hasicic@gmail.com>
|
||||
Angel Velazquez <angelcar@amazon.com>
|
||||
Anil Belur <askb23@gmail.com>
|
||||
Anil Madhavapeddy <anil@recoil.org>
|
||||
Anirudh Aithal <aithal@amazon.com>
|
||||
Ankit Jain <ajatkj@yahoo.co.in>
|
||||
Ankush Agarwal <ankushagarwal11@gmail.com>
|
||||
Anonmily <michelle@michelleliu.io>
|
||||
@@ -227,7 +229,7 @@ Arun Gupta <arun.gupta@gmail.com>
|
||||
Asad Saeeduddin <masaeedu@gmail.com>
|
||||
Asbjørn Enge <asbjorn@hanafjedle.net>
|
||||
Ashly Mathew <ashly.mathew@sap.com>
|
||||
Austin Vazquez <macedonv@amazon.com>
|
||||
Austin Vazquez <austin.vazquez.dev@gmail.com>
|
||||
averagehuman <averagehuman@users.noreply.github.com>
|
||||
Avi Das <andas222@gmail.com>
|
||||
Avi Kivity <avi@scylladb.com>
|
||||
@@ -293,6 +295,7 @@ Brandon Liu <bdon@bdon.org>
|
||||
Brandon Philips <brandon.philips@coreos.com>
|
||||
Brandon Rhodes <brandon@rhodesmill.org>
|
||||
Brendan Dixon <brendand@microsoft.com>
|
||||
Brendon Smith <bws@bws.bio>
|
||||
Brennan Kinney <5098581+polarathene@users.noreply.github.com>
|
||||
Brent Salisbury <brent.salisbury@docker.com>
|
||||
Brett Higgins <brhiggins@arbor.net>
|
||||
@@ -347,6 +350,7 @@ Casey Bisson <casey.bisson@joyent.com>
|
||||
Catalin Pirvu <pirvu.catalin94@gmail.com>
|
||||
Ce Gao <ce.gao@outlook.com>
|
||||
Cedric Davies <cedricda@microsoft.com>
|
||||
Cesar Talledo <cesar.talledo@docker.com>
|
||||
Cezar Sa Espinola <cezarsa@gmail.com>
|
||||
Chad Swenson <chadswen@gmail.com>
|
||||
Chance Zibolski <chance.zibolski@gmail.com>
|
||||
@@ -375,6 +379,7 @@ Chen Qiu <cheney-90@hotmail.com>
|
||||
Cheng-mean Liu <soccerl@microsoft.com>
|
||||
Chengfei Shang <cfshang@alauda.io>
|
||||
Chengguang Xu <cgxu519@gmx.com>
|
||||
Chengyu Zhu <hudson@cyzhu.com>
|
||||
Chentianze <cmoman@126.com>
|
||||
Chenyang Yan <memory.yancy@gmail.com>
|
||||
chenyuzhu <chenyuzhi@oschina.cn>
|
||||
@@ -1207,6 +1212,7 @@ K. Heller <pestophagous@gmail.com>
|
||||
Kai Blin <kai@samba.org>
|
||||
Kai Qiang Wu (Kennan) <wkq5325@gmail.com>
|
||||
Kaijie Chen <chen@kaijie.org>
|
||||
Kaita Nakamura <kaita.nakamura0830@gmail.com>
|
||||
Kamil Domański <kamil@domanski.co>
|
||||
Kamjar Gerami <kami.gerami@gmail.com>
|
||||
Kanstantsin Shautsou <kanstantsin.sha@gmail.com>
|
||||
@@ -1281,6 +1287,7 @@ Krasi Georgiev <krasi@vip-consult.solutions>
|
||||
Krasimir Georgiev <support@vip-consult.co.uk>
|
||||
Kris-Mikael Krister <krismikael@protonmail.com>
|
||||
Kristian Haugene <kristian.haugene@capgemini.com>
|
||||
Kristian Heljas <kristian@kristian.ee>
|
||||
Kristina Zabunova <triara.xiii@gmail.com>
|
||||
Krystian Wojcicki <kwojcicki@sympatico.ca>
|
||||
Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp>
|
||||
@@ -1482,6 +1489,7 @@ Matthias Kühnle <git.nivoc@neverbox.com>
|
||||
Matthias Rampke <mr@soundcloud.com>
|
||||
Matthieu Fronton <m@tthieu.fr>
|
||||
Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
|
||||
Matthieu MOREL <matthieu.morel35@gmail.com>
|
||||
Mattias Jernberg <nostrad@gmail.com>
|
||||
Mauricio Garavaglia <mauricio@medallia.com>
|
||||
mauriyouth <mauriyouth@gmail.com>
|
||||
@@ -1712,6 +1720,7 @@ Patrick Hemmer <patrick.hemmer@gmail.com>
|
||||
Patrick St. laurent <patrick@saint-laurent.us>
|
||||
Patrick Stapleton <github@gdi2290.com>
|
||||
Patrik Cyvoct <patrik@ptrk.io>
|
||||
Patrik Leifert <patrikleifert@hotmail.com>
|
||||
pattichen <craftsbear@gmail.com>
|
||||
Paul "TBBle" Hampson <Paul.Hampson@Pobox.com>
|
||||
Paul <paul9869@gmail.com>
|
||||
@@ -1870,6 +1879,7 @@ Robert Obryk <robryk@gmail.com>
|
||||
Robert Schneider <mail@shakeme.info>
|
||||
Robert Shade <robert.shade@gmail.com>
|
||||
Robert Stern <lexandro2000@gmail.com>
|
||||
Robert Sturla <robertsturla@outlook.com>
|
||||
Robert Terhaar <rterhaar@atlanticdynamic.com>
|
||||
Robert Wallis <smilingrob@gmail.com>
|
||||
Robert Wang <robert@arctic.tw>
|
||||
|
2
src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go
generated
vendored
2
src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go
generated
vendored
@@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api"
|
||||
// Common constants for daemon and client.
|
||||
const (
|
||||
// DefaultVersion of the current REST API.
|
||||
DefaultVersion = "1.48"
|
||||
DefaultVersion = "1.50"
|
||||
|
||||
// MinSupportedAPIVersion is the minimum API version that can be supported
|
||||
// by the API server, specified as "major.minor". Note that the daemon
|
||||
|
1069
src/cmd/linuxkit/vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
1069
src/cmd/linuxkit/vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
File diff suppressed because it is too large
Load Diff
91
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/build.go
generated
vendored
Normal file
91
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/build.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
// BuilderVersion sets the version of underlying builder to use
|
||||
type BuilderVersion string
|
||||
|
||||
const (
|
||||
// BuilderV1 is the first generation builder in docker daemon
|
||||
BuilderV1 BuilderVersion = "1"
|
||||
// BuilderBuildKit is builder based on moby/buildkit project
|
||||
BuilderBuildKit BuilderVersion = "2"
|
||||
)
|
||||
|
||||
// Result contains the image id of a successful build.
|
||||
type Result struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// ImageBuildOptions holds the information
|
||||
// necessary to build images.
|
||||
type ImageBuildOptions struct {
|
||||
Tags []string
|
||||
SuppressOutput bool
|
||||
RemoteContext string
|
||||
NoCache bool
|
||||
Remove bool
|
||||
ForceRemove bool
|
||||
PullParent bool
|
||||
Isolation container.Isolation
|
||||
CPUSetCPUs string
|
||||
CPUSetMems string
|
||||
CPUShares int64
|
||||
CPUQuota int64
|
||||
CPUPeriod int64
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CgroupParent string
|
||||
NetworkMode string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*container.Ulimit
|
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
// api/server/router/build/build_routes.go for even more info.
|
||||
BuildArgs map[string]*string
|
||||
AuthConfigs map[string]registry.AuthConfig
|
||||
Context io.Reader
|
||||
Labels map[string]string
|
||||
// squash the resulting image's layers to the parent
|
||||
// preserves the original image and creates a new one from the parent with all
|
||||
// the changes applied to a single layer
|
||||
Squash bool
|
||||
// CacheFrom specifies images that are used for matching cache. Images
|
||||
// specified here do not need to have a valid parent chain to match cache.
|
||||
CacheFrom []string
|
||||
SecurityOpt []string
|
||||
ExtraHosts []string // List of extra hosts
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
// Version specifies the version of the underlying builder to use
|
||||
Version BuilderVersion
|
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
// build with the cancel request.
|
||||
BuildID string
|
||||
// Outputs defines configurations for exporting build results. Only supported
|
||||
// in BuildKit mode
|
||||
Outputs []ImageBuildOutput
|
||||
}
|
||||
|
||||
// ImageBuildOutput defines configuration for exporting a build result
|
||||
type ImageBuildOutput struct {
|
||||
Type string
|
||||
Attrs map[string]string
|
||||
}
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
// returned by a server after building
|
||||
// an image.
|
||||
type ImageBuildResponse struct {
|
||||
Body io.ReadCloser
|
||||
OSType string
|
||||
}
|
52
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/cache.go
generated
vendored
Normal file
52
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/cache.go
generated
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
package build
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// CacheRecord contains information about a build cache record.
|
||||
type CacheRecord struct {
|
||||
// ID is the unique ID of the build cache record.
|
||||
ID string
|
||||
// Parent is the ID of the parent build cache record.
|
||||
//
|
||||
// Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
|
||||
Parent string `json:"Parent,omitempty"`
|
||||
// Parents is the list of parent build cache record IDs.
|
||||
Parents []string `json:" Parents,omitempty"`
|
||||
// Type is the cache record type.
|
||||
Type string
|
||||
// Description is a description of the build-step that produced the build cache.
|
||||
Description string
|
||||
// InUse indicates if the build cache is in use.
|
||||
InUse bool
|
||||
// Shared indicates if the build cache is shared.
|
||||
Shared bool
|
||||
// Size is the amount of disk space used by the build cache (in bytes).
|
||||
Size int64
|
||||
// CreatedAt is the date and time at which the build cache was created.
|
||||
CreatedAt time.Time
|
||||
// LastUsedAt is the date and time at which the build cache was last used.
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
}
|
||||
|
||||
// CachePruneOptions hold parameters to prune the build cache.
|
||||
type CachePruneOptions struct {
|
||||
All bool
|
||||
ReservedSpace int64
|
||||
MaxUsedSpace int64
|
||||
MinFreeSpace int64
|
||||
Filters filters.Args
|
||||
|
||||
KeepStorage int64 // Deprecated: deprecated in API 1.48.
|
||||
}
|
||||
|
||||
// CachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type CachePruneReport struct {
|
||||
CachesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/disk_usage.go
generated
vendored
Normal file
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/disk_usage.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package build
|
||||
|
||||
// CacheDiskUsage contains disk usage for the build cache.
|
||||
type CacheDiskUsage struct {
|
||||
TotalSize int64
|
||||
Reclaimable int64
|
||||
Items []*CacheRecord
|
||||
}
|
171
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go
generated
vendored
171
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go
generated
vendored
@@ -3,12 +3,7 @@ package types // import "github.com/docker/docker/api/types"
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/registry"
|
||||
)
|
||||
|
||||
// NewHijackedResponse initializes a [HijackedResponse] type.
|
||||
@@ -51,165 +46,6 @@ func (h *HijackedResponse) CloseWrite() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImageBuildOptions holds the information
|
||||
// necessary to build images.
|
||||
type ImageBuildOptions struct {
|
||||
Tags []string
|
||||
SuppressOutput bool
|
||||
RemoteContext string
|
||||
NoCache bool
|
||||
Remove bool
|
||||
ForceRemove bool
|
||||
PullParent bool
|
||||
Isolation container.Isolation
|
||||
CPUSetCPUs string
|
||||
CPUSetMems string
|
||||
CPUShares int64
|
||||
CPUQuota int64
|
||||
CPUPeriod int64
|
||||
Memory int64
|
||||
MemorySwap int64
|
||||
CgroupParent string
|
||||
NetworkMode string
|
||||
ShmSize int64
|
||||
Dockerfile string
|
||||
Ulimits []*container.Ulimit
|
||||
// BuildArgs needs to be a *string instead of just a string so that
|
||||
// we can tell the difference between "" (empty string) and no value
|
||||
// at all (nil). See the parsing of buildArgs in
|
||||
// api/server/router/build/build_routes.go for even more info.
|
||||
BuildArgs map[string]*string
|
||||
AuthConfigs map[string]registry.AuthConfig
|
||||
Context io.Reader
|
||||
Labels map[string]string
|
||||
// squash the resulting image's layers to the parent
|
||||
// preserves the original image and creates a new one from the parent with all
|
||||
// the changes applied to a single layer
|
||||
Squash bool
|
||||
// CacheFrom specifies images that are used for matching cache. Images
|
||||
// specified here do not need to have a valid parent chain to match cache.
|
||||
CacheFrom []string
|
||||
SecurityOpt []string
|
||||
ExtraHosts []string // List of extra hosts
|
||||
Target string
|
||||
SessionID string
|
||||
Platform string
|
||||
// Version specifies the version of the underlying builder to use
|
||||
Version BuilderVersion
|
||||
// BuildID is an optional identifier that can be passed together with the
|
||||
// build request. The same identifier can be used to gracefully cancel the
|
||||
// build with the cancel request.
|
||||
BuildID string
|
||||
// Outputs defines configurations for exporting build results. Only supported
|
||||
// in BuildKit mode
|
||||
Outputs []ImageBuildOutput
|
||||
}
|
||||
|
||||
// ImageBuildOutput defines configuration for exporting a build result
|
||||
type ImageBuildOutput struct {
|
||||
Type string
|
||||
Attrs map[string]string
|
||||
}
|
||||
|
||||
// BuilderVersion sets the version of underlying builder to use
|
||||
type BuilderVersion string
|
||||
|
||||
const (
|
||||
// BuilderV1 is the first generation builder in docker daemon
|
||||
BuilderV1 BuilderVersion = "1"
|
||||
// BuilderBuildKit is builder based on moby/buildkit project
|
||||
BuilderBuildKit BuilderVersion = "2"
|
||||
)
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
// returned by a server after building
|
||||
// an image.
|
||||
type ImageBuildResponse struct {
|
||||
Body io.ReadCloser
|
||||
OSType string
|
||||
}
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NodeRemoveOptions holds parameters to remove nodes with.
|
||||
type NodeRemoveOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// ServiceCreateOptions contains the options to use when creating a service.
|
||||
type ServiceCreateOptions struct {
|
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
||||
const (
|
||||
RegistryAuthFromSpec = "spec"
|
||||
RegistryAuthFromPreviousSpec = "previous-spec"
|
||||
)
|
||||
|
||||
// ServiceUpdateOptions contains the options to be used for updating services.
|
||||
type ServiceUpdateOptions struct {
|
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
|
||||
// into this field. While it does open API users up to racy writes, most
|
||||
// users may not need that level of consistency in practice.
|
||||
|
||||
// RegistryAuthFrom specifies where to find the registry authorization
|
||||
// credentials if they are not given in EncodedRegistryAuth. Valid
|
||||
// values are "spec" and "previous-spec".
|
||||
RegistryAuthFrom string
|
||||
|
||||
// Rollback indicates whether a server-side rollback should be
|
||||
// performed. When this is set, the provided spec will be ignored.
|
||||
// The valid values are "previous" and "none". An empty value is the
|
||||
// same as "none".
|
||||
Rollback string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
type ServiceListOptions struct {
|
||||
Filters filters.Args
|
||||
|
||||
// Status indicates whether the server should include the service task
|
||||
// count of running and desired tasks.
|
||||
Status bool
|
||||
}
|
||||
|
||||
// ServiceInspectOptions holds parameters related to the "service inspect"
|
||||
// operation.
|
||||
type ServiceInspectOptions struct {
|
||||
InsertDefaults bool
|
||||
}
|
||||
|
||||
// TaskListOptions holds parameters to list tasks with.
|
||||
type TaskListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PluginRemoveOptions holds parameters to remove plugins.
|
||||
type PluginRemoveOptions struct {
|
||||
Force bool
|
||||
@@ -243,13 +79,6 @@ type PluginInstallOptions struct {
|
||||
Args []string
|
||||
}
|
||||
|
||||
// SwarmUnlockKeyResponse contains the response for Engine API:
|
||||
// GET /swarm/unlockkey
|
||||
type SwarmUnlockKeyResponse struct {
|
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string
|
||||
}
|
||||
|
||||
// PluginCreateOptions hold all options to plugin create.
|
||||
type PluginCreateOptions struct {
|
||||
RepoName string
|
||||
|
4
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container.go
generated
vendored
4
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container.go
generated
vendored
@@ -104,7 +104,7 @@ type MountPoint struct {
|
||||
// State stores container's running state
|
||||
// it's part of ContainerJSONBase and returned by "inspect" command
|
||||
type State struct {
|
||||
Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Status ContainerState // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead"
|
||||
Running bool
|
||||
Paused bool
|
||||
Restarting bool
|
||||
@@ -132,7 +132,7 @@ type Summary struct {
|
||||
SizeRw int64 `json:",omitempty"`
|
||||
SizeRootFs int64 `json:",omitempty"`
|
||||
Labels map[string]string
|
||||
State string
|
||||
State ContainerState
|
||||
Status string
|
||||
HostConfig struct {
|
||||
NetworkMode string `json:",omitempty"`
|
||||
|
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/disk_usage.go
generated
vendored
Normal file
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/disk_usage.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package container
|
||||
|
||||
// DiskUsage contains disk usage for containers.
|
||||
type DiskUsage struct {
|
||||
TotalSize int64
|
||||
Reclaimable int64
|
||||
Items []*Summary
|
||||
}
|
36
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/health.go
generated
vendored
36
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/health.go
generated
vendored
@@ -1,18 +1,27 @@
|
||||
package container
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthStatus is a string representation of the container's health.
|
||||
//
|
||||
// It currently is an alias for string, but may become a distinct type in future.
|
||||
type HealthStatus = string
|
||||
|
||||
// Health states
|
||||
const (
|
||||
NoHealthcheck = "none" // Indicates there is no healthcheck
|
||||
Starting = "starting" // Starting indicates that the container is not yet ready
|
||||
Healthy = "healthy" // Healthy indicates that the container is running correctly
|
||||
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
|
||||
NoHealthcheck HealthStatus = "none" // Indicates there is no healthcheck
|
||||
Starting HealthStatus = "starting" // Starting indicates that the container is not yet ready
|
||||
Healthy HealthStatus = "healthy" // Healthy indicates that the container is running correctly
|
||||
Unhealthy HealthStatus = "unhealthy" // Unhealthy indicates that the container has a problem
|
||||
)
|
||||
|
||||
// Health stores information about the container's healthcheck results
|
||||
type Health struct {
|
||||
Status string // Status is one of [Starting], [Healthy] or [Unhealthy].
|
||||
Status HealthStatus // Status is one of [Starting], [Healthy] or [Unhealthy].
|
||||
FailingStreak int // FailingStreak is the number of consecutive failures
|
||||
Log []*HealthcheckResult // Log contains the last few results (oldest first)
|
||||
}
|
||||
@@ -24,3 +33,18 @@ type HealthcheckResult struct {
|
||||
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
|
||||
Output string // Output from last check
|
||||
}
|
||||
|
||||
var validHealths = []string{
|
||||
NoHealthcheck, Starting, Healthy, Unhealthy,
|
||||
}
|
||||
|
||||
// ValidateHealthStatus checks if the provided string is a valid
|
||||
// container [HealthStatus].
|
||||
func ValidateHealthStatus(s HealthStatus) error {
|
||||
switch s {
|
||||
case NoHealthcheck, Starting, Healthy, Unhealthy:
|
||||
return nil
|
||||
default:
|
||||
return errInvalidParameter{error: fmt.Errorf("invalid value for health (%s): must be one of %s", s, strings.Join(validHealths, ", "))}
|
||||
}
|
||||
}
|
||||
|
4
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/hostconfig.go
generated
vendored
4
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/hostconfig.go
generated
vendored
@@ -145,7 +145,7 @@ func (n NetworkMode) IsDefault() bool {
|
||||
|
||||
// IsPrivate indicates whether container uses its private network stack.
|
||||
func (n NetworkMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
return !n.IsHost() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsContainer indicates whether container uses a container network stack.
|
||||
@@ -230,7 +230,7 @@ type PidMode string
|
||||
|
||||
// IsPrivate indicates whether the container uses its own new pid namespace.
|
||||
func (n PidMode) IsPrivate() bool {
|
||||
return !(n.IsHost() || n.IsContainer())
|
||||
return !n.IsHost() && !n.IsContainer()
|
||||
}
|
||||
|
||||
// IsHost indicates whether the container uses the host's pid namespace.
|
||||
|
64
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/state.go
generated
vendored
Normal file
64
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/state.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ContainerState is a string representation of the container's current state.
|
||||
//
|
||||
// It currently is an alias for string, but may become a distinct type in the future.
|
||||
type ContainerState = string
|
||||
|
||||
const (
|
||||
StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started.
|
||||
StateRunning ContainerState = "running" // StateRunning indicates that the container is running.
|
||||
StatePaused ContainerState = "paused" // StatePaused indicates that the container's current state is paused.
|
||||
StateRestarting ContainerState = "restarting" // StateRestarting indicates that the container is currently restarting.
|
||||
StateRemoving ContainerState = "removing" // StateRemoving indicates that the container is being removed.
|
||||
StateExited ContainerState = "exited" // StateExited indicates that the container exited.
|
||||
StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts.
|
||||
)
|
||||
|
||||
var validStates = []ContainerState{
|
||||
StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead,
|
||||
}
|
||||
|
||||
// ValidateContainerState checks if the provided string is a valid
|
||||
// container [ContainerState].
|
||||
func ValidateContainerState(s ContainerState) error {
|
||||
switch s {
|
||||
case StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead:
|
||||
return nil
|
||||
default:
|
||||
return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))}
|
||||
}
|
||||
}
|
||||
|
||||
// StateStatus is used to return container wait results.
|
||||
// Implements exec.ExitCode interface.
|
||||
// This type is needed as State include a sync.Mutex field which make
|
||||
// copying it unsafe.
|
||||
type StateStatus struct {
|
||||
exitCode int
|
||||
err error
|
||||
}
|
||||
|
||||
// ExitCode returns current exitcode for the state.
|
||||
func (s StateStatus) ExitCode() int {
|
||||
return s.exitCode
|
||||
}
|
||||
|
||||
// Err returns current error for the state. Returns nil if the container had
|
||||
// exited on its own.
|
||||
func (s StateStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// NewStateStatus returns a new StateStatus with the given exit code and error.
|
||||
func NewStateStatus(exitCode int, err error) StateStatus {
|
||||
return StateStatus{
|
||||
exitCode: exitCode,
|
||||
err: err,
|
||||
}
|
||||
}
|
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/disk_usage.go
generated
vendored
Normal file
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/disk_usage.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package image
|
||||
|
||||
// DiskUsage contains disk usage for images.
|
||||
type DiskUsage struct {
|
||||
TotalSize int64
|
||||
Reclaimable int64
|
||||
Items []*Summary
|
||||
}
|
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_inspect.go
generated
vendored
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_inspect.go
generated
vendored
@@ -3,6 +3,7 @@ package image
|
||||
import (
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/storage"
|
||||
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
@@ -84,7 +85,7 @@ type InspectResponse struct {
|
||||
// Author is the name of the author that was specified when committing the
|
||||
// image, or as specified through MAINTAINER (deprecated) in the Dockerfile.
|
||||
Author string
|
||||
Config *container.Config
|
||||
Config *dockerspec.DockerOCIImageConfig
|
||||
|
||||
// Architecture is the hardware CPU architecture that the image runs on.
|
||||
Architecture string
|
||||
@@ -128,11 +129,12 @@ type InspectResponse struct {
|
||||
// compatibility.
|
||||
Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"`
|
||||
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// Manifests is a list of image manifests available in this image. It
|
||||
// provides a more detailed view of the platform-specific image manifests or
|
||||
// other image-attached data like build attestations.
|
||||
//
|
||||
// Only available if the daemon provides a multi-platform image store.
|
||||
// Only available if the daemon provides a multi-platform image store, the client
|
||||
// requests manifests AND does not request a specific platform.
|
||||
//
|
||||
// WARNING: This is experimental and may change at any time without any backward
|
||||
// compatibility.
|
||||
|
6
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/opts.go
generated
vendored
6
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/opts.go
generated
vendored
@@ -83,6 +83,7 @@ type ListOptions struct {
|
||||
|
||||
// RemoveOptions holds parameters to remove images.
|
||||
type RemoveOptions struct {
|
||||
Platforms []ocispec.Platform
|
||||
Force bool
|
||||
PruneChildren bool
|
||||
}
|
||||
@@ -106,6 +107,11 @@ type LoadOptions struct {
|
||||
type InspectOptions struct {
|
||||
// Manifests returns the image manifests.
|
||||
Manifests bool
|
||||
|
||||
// Platform selects the specific platform of a multi-platform image to inspect.
|
||||
//
|
||||
// This option is only available for API version 1.49 and up.
|
||||
Platform *ocispec.Platform
|
||||
}
|
||||
|
||||
// SaveOptions holds parameters to save images.
|
||||
|
44
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/registry/registry.go
generated
vendored
44
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/registry/registry.go
generated
vendored
@@ -1,3 +1,6 @@
|
||||
// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16:
|
||||
//go:build go1.23
|
||||
|
||||
package registry // import "github.com/docker/docker/api/types/registry"
|
||||
|
||||
import (
|
||||
@@ -15,23 +18,26 @@ type ServiceConfig struct {
|
||||
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
|
||||
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
|
||||
Mirrors []string
|
||||
|
||||
// ExtraFields is for internal use to include deprecated fields on older API versions.
|
||||
ExtraFields map[string]any `json:"-"`
|
||||
}
|
||||
|
||||
// MarshalJSON implements a custom marshaler to include legacy fields
|
||||
// in API responses.
|
||||
func (sc ServiceConfig) MarshalJSON() ([]byte, error) {
|
||||
tmp := map[string]interface{}{
|
||||
"InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs,
|
||||
"IndexConfigs": sc.IndexConfigs,
|
||||
"Mirrors": sc.Mirrors,
|
||||
func (sc *ServiceConfig) MarshalJSON() ([]byte, error) {
|
||||
type tmp ServiceConfig
|
||||
base, err := json.Marshal((*tmp)(sc))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sc.AllowNondistributableArtifactsCIDRs != nil {
|
||||
tmp["AllowNondistributableArtifactsCIDRs"] = nil
|
||||
var merged map[string]any
|
||||
_ = json.Unmarshal(base, &merged)
|
||||
|
||||
for k, v := range sc.ExtraFields {
|
||||
merged[k] = v
|
||||
}
|
||||
if sc.AllowNondistributableArtifactsHostnames != nil {
|
||||
tmp["AllowNondistributableArtifactsHostnames"] = nil
|
||||
}
|
||||
return json.Marshal(tmp)
|
||||
return json.Marshal(merged)
|
||||
}
|
||||
|
||||
// NetIPNet is the net.IPNet type, which can be marshalled and
|
||||
@@ -49,15 +55,17 @@ func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets the IPNet from a byte array of JSON
|
||||
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
|
||||
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error {
|
||||
var ipnetStr string
|
||||
if err = json.Unmarshal(b, &ipnetStr); err == nil {
|
||||
var cidr *net.IPNet
|
||||
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
|
||||
*ipnet = NetIPNet(*cidr)
|
||||
}
|
||||
if err := json.Unmarshal(b, &ipnetStr); err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
_, cidr, err := net.ParseCIDR(ipnetStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ipnet = NetIPNet(*cidr)
|
||||
return nil
|
||||
}
|
||||
|
||||
// IndexInfo contains information about a registry
|
||||
|
24
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go
generated
vendored
24
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go
generated
vendored
@@ -1,6 +1,10 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// Config represents a config.
|
||||
type Config struct {
|
||||
@@ -12,6 +16,12 @@ type Config struct {
|
||||
// ConfigSpec represents a config specification from a config in swarm
|
||||
type ConfigSpec struct {
|
||||
Annotations
|
||||
|
||||
// Data is the data to store as a config.
|
||||
//
|
||||
// The maximum allowed size is 1000KB, as defined in [MaxConfigSize].
|
||||
//
|
||||
// [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize
|
||||
Data []byte `json:",omitempty"`
|
||||
|
||||
// Templating controls whether and how to evaluate the config payload as
|
||||
@@ -38,3 +48,15 @@ type ConfigReference struct {
|
||||
ConfigID string
|
||||
ConfigName string
|
||||
}
|
||||
|
||||
// ConfigCreateResponse contains the information returned to a client
|
||||
// on the creation of a new config.
|
||||
type ConfigCreateResponse struct {
|
||||
// ID is the id of the created config.
|
||||
ID string
|
||||
}
|
||||
|
||||
// ConfigListOptions holds parameters to list configs
|
||||
type ConfigListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
11
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/node.go
generated
vendored
11
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/node.go
generated
vendored
@@ -1,4 +1,5 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
import "github.com/docker/docker/api/types/filters"
|
||||
|
||||
// Node represents a node.
|
||||
type Node struct {
|
||||
@@ -137,3 +138,13 @@ const (
|
||||
type Topology struct {
|
||||
Segments map[string]string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
type NodeListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// NodeRemoveOptions holds parameters to remove nodes with.
|
||||
type NodeRemoveOptions struct {
|
||||
Force bool
|
||||
}
|
||||
|
36
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/secret.go
generated
vendored
36
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/secret.go
generated
vendored
@@ -1,6 +1,10 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "os"
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// Secret represents a secret.
|
||||
type Secret struct {
|
||||
@@ -12,8 +16,22 @@ type Secret struct {
|
||||
// SecretSpec represents a secret specification from a secret in swarm
|
||||
type SecretSpec struct {
|
||||
Annotations
|
||||
Data []byte `json:",omitempty"`
|
||||
Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store
|
||||
|
||||
// Data is the data to store as a secret. It must be empty if a
|
||||
// [Driver] is used, in which case the data is loaded from an external
|
||||
// secret store. The maximum allowed size is 500KB, as defined in
|
||||
// [MaxSecretSize].
|
||||
//
|
||||
// This field is only used to create the secret, and is not returned
|
||||
// by other endpoints.
|
||||
//
|
||||
// [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize
|
||||
Data []byte `json:",omitempty"`
|
||||
|
||||
// Driver is the name of the secrets driver used to fetch the secret's
|
||||
// value from an external secret store. If not set, the default built-in
|
||||
// store is used.
|
||||
Driver *Driver `json:",omitempty"`
|
||||
|
||||
// Templating controls whether and how to evaluate the secret payload as
|
||||
// a template. If it is not set, no templating is used.
|
||||
@@ -34,3 +52,15 @@ type SecretReference struct {
|
||||
SecretID string
|
||||
SecretName string
|
||||
}
|
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
type SecretCreateResponse struct {
|
||||
// ID is the id of the created secret.
|
||||
ID string
|
||||
}
|
||||
|
||||
// SecretListOptions holds parameters to list secrets
|
||||
type SecretListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
72
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/service.go
generated
vendored
72
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/service.go
generated
vendored
@@ -1,6 +1,10 @@
|
||||
package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
)
|
||||
|
||||
// Service represents a service.
|
||||
type Service struct {
|
||||
@@ -200,3 +204,69 @@ type JobStatus struct {
|
||||
// Swarm manager.
|
||||
LastExecution time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ServiceCreateOptions contains the options to use when creating a service.
|
||||
type ServiceCreateOptions struct {
|
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// Values for RegistryAuthFrom in ServiceUpdateOptions
|
||||
const (
|
||||
RegistryAuthFromSpec = "spec"
|
||||
RegistryAuthFromPreviousSpec = "previous-spec"
|
||||
)
|
||||
|
||||
// ServiceUpdateOptions contains the options to be used for updating services.
|
||||
type ServiceUpdateOptions struct {
|
||||
// EncodedRegistryAuth is the encoded registry authorization credentials to
|
||||
// use when updating the service.
|
||||
//
|
||||
// This field follows the format of the X-Registry-Auth header.
|
||||
EncodedRegistryAuth string
|
||||
|
||||
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
|
||||
// into this field. While it does open API users up to racy writes, most
|
||||
// users may not need that level of consistency in practice.
|
||||
|
||||
// RegistryAuthFrom specifies where to find the registry authorization
|
||||
// credentials if they are not given in EncodedRegistryAuth. Valid
|
||||
// values are "spec" and "previous-spec".
|
||||
RegistryAuthFrom string
|
||||
|
||||
// Rollback indicates whether a server-side rollback should be
|
||||
// performed. When this is set, the provided spec will be ignored.
|
||||
// The valid values are "previous" and "none". An empty value is the
|
||||
// same as "none".
|
||||
Rollback string
|
||||
|
||||
// QueryRegistry indicates whether the service update requires
|
||||
// contacting a registry. A registry may be contacted to retrieve
|
||||
// the image digest and manifest, which in turn can be used to update
|
||||
// platform or other information about the service.
|
||||
QueryRegistry bool
|
||||
}
|
||||
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
type ServiceListOptions struct {
|
||||
Filters filters.Args
|
||||
|
||||
// Status indicates whether the server should include the service task
|
||||
// count of running and desired tasks.
|
||||
Status bool
|
||||
}
|
||||
|
||||
// ServiceInspectOptions holds parameters related to the "service inspect"
|
||||
// operation.
|
||||
type ServiceInspectOptions struct {
|
||||
InsertDefaults bool
|
||||
}
|
||||
|
7
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
7
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go
generated
vendored
@@ -235,3 +235,10 @@ type UpdateFlags struct {
|
||||
RotateManagerToken bool
|
||||
RotateManagerUnlockKey bool
|
||||
}
|
||||
|
||||
// UnlockKeyResponse contains the response for Engine API:
|
||||
// GET /swarm/unlockkey
|
||||
type UnlockKeyResponse struct {
|
||||
// UnlockKey is the unlock key in ASCII-armored format.
|
||||
UnlockKey string
|
||||
}
|
||||
|
6
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
6
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go
generated
vendored
@@ -3,6 +3,7 @@ package swarm // import "github.com/docker/docker/api/types/swarm"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm/runtime"
|
||||
)
|
||||
|
||||
@@ -223,3 +224,8 @@ type VolumeAttachment struct {
|
||||
// in the ContainerSpec, that this volume fulfills.
|
||||
Target string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// TaskListOptions holds parameters to list tasks with.
|
||||
type TaskListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
17
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/disk_usage.go
generated
vendored
Normal file
17
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/disk_usage.go
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"github.com/docker/docker/api/types/build"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
)
|
||||
|
||||
// DiskUsage contains response of Engine API for API 1.49 and greater:
|
||||
// GET "/system/df"
|
||||
type DiskUsage struct {
|
||||
Images *image.DiskUsage
|
||||
Containers *container.DiskUsage
|
||||
Volumes *volume.DiskUsage
|
||||
BuildCache *build.CacheDiskUsage
|
||||
}
|
23
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/info.go
generated
vendored
23
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/info.go
generated
vendored
@@ -29,8 +29,6 @@ type Info struct {
|
||||
CPUSet bool
|
||||
PidsLimit bool
|
||||
IPv4Forwarding bool
|
||||
BridgeNfIptables bool `json:"BridgeNfIptables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
|
||||
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release.
|
||||
Debug bool
|
||||
NFd int
|
||||
OomKillDisable bool
|
||||
@@ -73,7 +71,9 @@ type Info struct {
|
||||
SecurityOptions []string
|
||||
ProductLicense string `json:",omitempty"`
|
||||
DefaultAddressPools []NetworkAddressPool `json:",omitempty"`
|
||||
FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"`
|
||||
CDISpecDirs []string
|
||||
DiscoveredDevices []DeviceInfo `json:",omitempty"`
|
||||
|
||||
Containerd *ContainerdInfo `json:",omitempty"`
|
||||
|
||||
@@ -143,7 +143,7 @@ type Commit struct {
|
||||
// Expected is the commit ID of external tool expected by dockerd as set at build time.
|
||||
//
|
||||
// Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions.
|
||||
Expected string
|
||||
Expected string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NetworkAddressPool is a temp struct used by [Info] struct.
|
||||
@@ -151,3 +151,20 @@ type NetworkAddressPool struct {
|
||||
Base string
|
||||
Size int
|
||||
}
|
||||
|
||||
// FirewallInfo describes the firewall backend.
|
||||
type FirewallInfo struct {
|
||||
// Driver is the name of the firewall backend driver.
|
||||
Driver string `json:"Driver"`
|
||||
// Info is a list of label/value pairs, containing information related to the firewall.
|
||||
Info [][2]string `json:"Info,omitempty"`
|
||||
}
|
||||
|
||||
// DeviceInfo represents a discoverable device from a device driver.
|
||||
type DeviceInfo struct {
|
||||
// Source indicates the origin device driver.
|
||||
Source string `json:"Source"`
|
||||
// ID is the unique identifier for the device.
|
||||
// Example: CDI FQDN like "vendor.com/gpu=0", or other driver-specific device ID
|
||||
ID string `json:"ID"`
|
||||
}
|
||||
|
10
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/time/timestamp.go
generated
vendored
10
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/time/timestamp.go
generated
vendored
@@ -30,7 +30,7 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||
|
||||
var format string
|
||||
// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
|
||||
parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
|
||||
parseInLocation := !strings.ContainsAny(value, "zZ+") && strings.Count(value, "-") != 3
|
||||
|
||||
if strings.Contains(value, ".") {
|
||||
if parseInLocation {
|
||||
@@ -105,23 +105,23 @@ func GetTimestamp(value string, reference time.Time) (string, error) {
|
||||
// since := time.Unix(seconds, nanoseconds)
|
||||
//
|
||||
// returns seconds as defaultSeconds if value == ""
|
||||
func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) {
|
||||
func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, _ error) {
|
||||
if value == "" {
|
||||
return defaultSeconds, 0, nil
|
||||
}
|
||||
return parseTimestamp(value)
|
||||
}
|
||||
|
||||
func parseTimestamp(value string) (sec int64, nsec int64, err error) {
|
||||
func parseTimestamp(value string) (seconds int64, nanoseconds int64, _ error) {
|
||||
s, n, ok := strings.Cut(value, ".")
|
||||
sec, err = strconv.ParseInt(s, 10, 64)
|
||||
sec, err := strconv.ParseInt(s, 10, 64)
|
||||
if err != nil {
|
||||
return sec, 0, err
|
||||
}
|
||||
if !ok {
|
||||
return sec, 0, nil
|
||||
}
|
||||
nsec, err = strconv.ParseInt(n, 10, 64)
|
||||
nsec, err := strconv.ParseInt(n, 10, 64)
|
||||
if err != nil {
|
||||
return sec, nsec, err
|
||||
}
|
||||
|
82
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go
generated
vendored
82
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go
generated
vendored
@@ -1,10 +1,8 @@
|
||||
package types // import "github.com/docker/docker/api/types"
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/build"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
@@ -24,7 +22,7 @@ type Ping struct {
|
||||
APIVersion string
|
||||
OSType string
|
||||
Experimental bool
|
||||
BuilderVersion BuilderVersion
|
||||
BuilderVersion build.BuilderVersion
|
||||
|
||||
// SwarmStatus provides information about the current swarm status of the
|
||||
// engine, obtained from the "Swarm" header in the API response.
|
||||
@@ -91,41 +89,10 @@ type DiskUsage struct {
|
||||
Images []*image.Summary
|
||||
Containers []*container.Summary
|
||||
Volumes []*volume.Volume
|
||||
BuildCache []*BuildCache
|
||||
BuildCache []*build.CacheRecord
|
||||
BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40.
|
||||
}
|
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
type BuildCachePruneReport struct {
|
||||
CachesDeleted []string
|
||||
SpaceReclaimed uint64
|
||||
}
|
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
type SecretCreateResponse struct {
|
||||
// ID is the id of the created secret.
|
||||
ID string
|
||||
}
|
||||
|
||||
// SecretListOptions holds parameters to list secrets
|
||||
type SecretListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// ConfigCreateResponse contains the information returned to a client
|
||||
// on the creation of a new config.
|
||||
type ConfigCreateResponse struct {
|
||||
// ID is the id of the created config.
|
||||
ID string
|
||||
}
|
||||
|
||||
// ConfigListOptions holds parameters to list configs
|
||||
type ConfigListOptions struct {
|
||||
Filters filters.Args
|
||||
}
|
||||
|
||||
// PushResult contains the tag, manifest digest, and manifest size from the
|
||||
// push. It's used to signal this information to the trust code in the client
|
||||
// so it can sign the manifest if necessary.
|
||||
@@ -134,46 +101,3 @@ type PushResult struct {
|
||||
Digest string
|
||||
Size int
|
||||
}
|
||||
|
||||
// BuildResult contains the image id of a successful build
|
||||
type BuildResult struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// BuildCache contains information about a build cache record.
|
||||
type BuildCache struct {
|
||||
// ID is the unique ID of the build cache record.
|
||||
ID string
|
||||
// Parent is the ID of the parent build cache record.
|
||||
//
|
||||
// Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
|
||||
Parent string `json:"Parent,omitempty"`
|
||||
// Parents is the list of parent build cache record IDs.
|
||||
Parents []string `json:" Parents,omitempty"`
|
||||
// Type is the cache record type.
|
||||
Type string
|
||||
// Description is a description of the build-step that produced the build cache.
|
||||
Description string
|
||||
// InUse indicates if the build cache is in use.
|
||||
InUse bool
|
||||
// Shared indicates if the build cache is shared.
|
||||
Shared bool
|
||||
// Size is the amount of disk space used by the build cache (in bytes).
|
||||
Size int64
|
||||
// CreatedAt is the date and time at which the build cache was created.
|
||||
CreatedAt time.Time
|
||||
// LastUsedAt is the date and time at which the build cache was last used.
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
}
|
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
type BuildCachePruneOptions struct {
|
||||
All bool
|
||||
ReservedSpace int64
|
||||
MaxUsedSpace int64
|
||||
MinFreeSpace int64
|
||||
Filters filters.Args
|
||||
|
||||
KeepStorage int64 // Deprecated: deprecated in API 1.48.
|
||||
}
|
||||
|
126
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types_deprecated.go
generated
vendored
126
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types_deprecated.go
generated
vendored
@@ -3,10 +3,12 @@ package types
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/docker/docker/api/types/build"
|
||||
"github.com/docker/docker/api/types/common"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/storage"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
// IDResponse Response to an API call that returns just an Id.
|
||||
@@ -113,3 +115,127 @@ type ImageInspect = image.InspectResponse
|
||||
//
|
||||
// Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig].
|
||||
type RequestPrivilegeFunc func(context.Context) (string, error)
|
||||
|
||||
// SecretCreateResponse contains the information returned to a client
|
||||
// on the creation of a new secret.
|
||||
//
|
||||
// Deprecated: use [swarm.SecretCreateResponse].
|
||||
type SecretCreateResponse = swarm.SecretCreateResponse
|
||||
|
||||
// SecretListOptions holds parameters to list secrets
|
||||
//
|
||||
// Deprecated: use [swarm.SecretListOptions].
|
||||
type SecretListOptions = swarm.SecretListOptions
|
||||
|
||||
// ConfigCreateResponse contains the information returned to a client
|
||||
// on the creation of a new config.
|
||||
//
|
||||
// Deprecated: use [swarm.ConfigCreateResponse].
|
||||
type ConfigCreateResponse = swarm.ConfigCreateResponse
|
||||
|
||||
// ConfigListOptions holds parameters to list configs
|
||||
//
|
||||
// Deprecated: use [swarm.ConfigListOptions].
|
||||
type ConfigListOptions = swarm.ConfigListOptions
|
||||
|
||||
// NodeListOptions holds parameters to list nodes with.
|
||||
//
|
||||
// Deprecated: use [swarm.NodeListOptions].
|
||||
type NodeListOptions = swarm.NodeListOptions
|
||||
|
||||
// NodeRemoveOptions holds parameters to remove nodes with.
|
||||
//
|
||||
// Deprecated: use [swarm.NodeRemoveOptions].
|
||||
type NodeRemoveOptions = swarm.NodeRemoveOptions
|
||||
|
||||
// TaskListOptions holds parameters to list tasks with.
|
||||
//
|
||||
// Deprecated: use [swarm.TaskListOptions].
|
||||
type TaskListOptions = swarm.TaskListOptions
|
||||
|
||||
// ServiceCreateOptions contains the options to use when creating a service.
|
||||
//
|
||||
// Deprecated: use [swarm.ServiceCreateOptions].
|
||||
type ServiceCreateOptions = swarm.ServiceCreateOptions
|
||||
|
||||
// ServiceUpdateOptions contains the options to be used for updating services.
|
||||
//
|
||||
// Deprecated: use [swarm.ServiceCreateOptions].
|
||||
type ServiceUpdateOptions = swarm.ServiceUpdateOptions
|
||||
|
||||
const (
|
||||
RegistryAuthFromSpec = swarm.RegistryAuthFromSpec // Deprecated: use [swarm.RegistryAuthFromSpec].
|
||||
RegistryAuthFromPreviousSpec = swarm.RegistryAuthFromPreviousSpec // Deprecated: use [swarm.RegistryAuthFromPreviousSpec].
|
||||
)
|
||||
|
||||
// ServiceListOptions holds parameters to list services with.
|
||||
//
|
||||
// Deprecated: use [swarm.ServiceListOptions].
|
||||
type ServiceListOptions = swarm.ServiceListOptions
|
||||
|
||||
// ServiceInspectOptions holds parameters related to the "service inspect"
|
||||
// operation.
|
||||
//
|
||||
// Deprecated: use [swarm.ServiceInspectOptions].
|
||||
type ServiceInspectOptions = swarm.ServiceInspectOptions
|
||||
|
||||
// SwarmUnlockKeyResponse contains the response for Engine API:
|
||||
// GET /swarm/unlockkey
|
||||
//
|
||||
// Deprecated: use [swarm.UnlockKeyResponse].
|
||||
type SwarmUnlockKeyResponse = swarm.UnlockKeyResponse
|
||||
|
||||
// BuildCache contains information about a build cache record.
|
||||
//
|
||||
// Deprecated: deprecated in API 1.49. Use [build.CacheRecord] instead.
|
||||
type BuildCache = build.CacheRecord
|
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
//
|
||||
// Deprecated: use [build.CachePruneOptions].
|
||||
type BuildCachePruneOptions = build.CachePruneOptions
|
||||
|
||||
// BuildCachePruneReport contains the response for Engine API:
|
||||
// POST "/build/prune"
|
||||
//
|
||||
// Deprecated: use [build.CachePruneReport].
|
||||
type BuildCachePruneReport = build.CachePruneReport
|
||||
|
||||
// BuildResult contains the image id of a successful build/
|
||||
//
|
||||
// Deprecated: use [build.Result].
|
||||
type BuildResult = build.Result
|
||||
|
||||
// ImageBuildOptions holds the information
|
||||
// necessary to build images.
|
||||
//
|
||||
// Deprecated: use [build.ImageBuildOptions].
|
||||
type ImageBuildOptions = build.ImageBuildOptions
|
||||
|
||||
// ImageBuildOutput defines configuration for exporting a build result
|
||||
//
|
||||
// Deprecated: use [build.ImageBuildOutput].
|
||||
type ImageBuildOutput = build.ImageBuildOutput
|
||||
|
||||
// ImageBuildResponse holds information
|
||||
// returned by a server after building
|
||||
// an image.
|
||||
//
|
||||
// Deprecated: use [build.ImageBuildResponse].
|
||||
type ImageBuildResponse = build.ImageBuildResponse
|
||||
|
||||
// BuilderVersion sets the version of underlying builder to use
|
||||
//
|
||||
// Deprecated: use [build.BuilderVersion].
|
||||
type BuilderVersion = build.BuilderVersion
|
||||
|
||||
const (
|
||||
// BuilderV1 is the first generation builder in docker daemon
|
||||
//
|
||||
// Deprecated: use [build.BuilderV1].
|
||||
BuilderV1 = build.BuilderV1
|
||||
// BuilderBuildKit is builder based on moby/buildkit project
|
||||
//
|
||||
// Deprecated: use [build.BuilderBuildKit].
|
||||
BuilderBuildKit = build.BuilderBuildKit
|
||||
)
|
||||
|
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/disk_usage.go
generated
vendored
Normal file
8
src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/disk_usage.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package volume
|
||||
|
||||
// DiskUsage contains disk usage for volumes.
|
||||
type DiskUsage struct {
|
||||
TotalSize int64
|
||||
Reclaimable int64
|
||||
Items []*Volume
|
||||
}
|
6
src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go
generated
vendored
6
src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go
generated
vendored
@@ -6,13 +6,13 @@ import (
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/build"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BuildCachePrune requests the daemon to delete unused cache data
|
||||
func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) {
|
||||
func (cli *Client) BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) {
|
||||
if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -47,7 +47,7 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru
|
||||
return nil, err
|
||||
}
|
||||
|
||||
report := types.BuildCachePruneReport{}
|
||||
report := build.CachePruneReport{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&report); err != nil {
|
||||
return nil, errors.Wrap(err, "error retrieving disk usage")
|
||||
}
|
||||
|
29
src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_interfaces.go
generated
vendored
29
src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_interfaces.go
generated
vendored
@@ -7,6 +7,7 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/build"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
@@ -109,8 +110,8 @@ type DistributionAPIClient interface {
|
||||
|
||||
// ImageAPIClient defines API client methods for the images
|
||||
type ImageAPIClient interface {
|
||||
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
|
||||
BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error)
|
||||
ImageBuild(ctx context.Context, context io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error)
|
||||
BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error)
|
||||
BuildCancel(ctx context.Context, id string) error
|
||||
ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error)
|
||||
ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error)
|
||||
@@ -154,8 +155,8 @@ type NetworkAPIClient interface {
|
||||
// NodeAPIClient defines API client methods for the nodes
|
||||
type NodeAPIClient interface {
|
||||
NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
|
||||
NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
|
||||
NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error
|
||||
NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error)
|
||||
NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error
|
||||
NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error
|
||||
}
|
||||
|
||||
@@ -175,22 +176,22 @@ type PluginAPIClient interface {
|
||||
|
||||
// ServiceAPIClient defines API client methods for the services
|
||||
type ServiceAPIClient interface {
|
||||
ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error)
|
||||
ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error)
|
||||
ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error)
|
||||
ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error)
|
||||
ServiceInspectWithRaw(ctx context.Context, serviceID string, options swarm.ServiceInspectOptions) (swarm.Service, []byte, error)
|
||||
ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error)
|
||||
ServiceRemove(ctx context.Context, serviceID string) error
|
||||
ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error)
|
||||
ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error)
|
||||
ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error)
|
||||
TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error)
|
||||
TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error)
|
||||
TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error)
|
||||
TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error)
|
||||
}
|
||||
|
||||
// SwarmAPIClient defines API client methods for the swarm
|
||||
type SwarmAPIClient interface {
|
||||
SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error)
|
||||
SwarmJoin(ctx context.Context, req swarm.JoinRequest) error
|
||||
SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error)
|
||||
SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error)
|
||||
SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error
|
||||
SwarmLeave(ctx context.Context, force bool) error
|
||||
SwarmInspect(ctx context.Context) (swarm.Swarm, error)
|
||||
@@ -219,8 +220,8 @@ type VolumeAPIClient interface {
|
||||
|
||||
// SecretAPIClient defines API client methods for secrets
|
||||
type SecretAPIClient interface {
|
||||
SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error)
|
||||
SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error)
|
||||
SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error)
|
||||
SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error)
|
||||
SecretRemove(ctx context.Context, id string) error
|
||||
SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error)
|
||||
SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error
|
||||
@@ -228,8 +229,8 @@ type SecretAPIClient interface {
|
||||
|
||||
// ConfigAPIClient defines API client methods for configs
|
||||
type ConfigAPIClient interface {
|
||||
ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error)
|
||||
ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error)
|
||||
ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error)
|
||||
ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error)
|
||||
ConfigRemove(ctx context.Context, id string) error
|
||||
ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error)
|
||||
ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error
|
||||
|
5
src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go
generated
vendored
5
src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go
generated
vendored
@@ -4,13 +4,12 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
// ConfigCreate creates a new config.
|
||||
func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) {
|
||||
var response types.ConfigCreateResponse
|
||||
func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) {
|
||||
var response swarm.ConfigCreateResponse
|
||||
if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
3
src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go
generated
vendored
3
src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go
generated
vendored
@@ -5,13 +5,12 @@ import (
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
// ConfigList returns the list of configs.
|
||||
func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) {
|
||||
func (cli *Client) ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) {
|
||||
if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
2
src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go
generated
vendored
2
src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go
generated
vendored
@@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti
|
||||
if tagged, ok := ref.(reference.Tagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
repository = reference.FamiliarName(ref)
|
||||
repository = ref.Name()
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
|
14
src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go
generated
vendored
14
src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go
generated
vendored
@@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client"
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/url"
|
||||
"path"
|
||||
"sort"
|
||||
@@ -54,6 +55,19 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
||||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
if versions.LessThan(cli.ClientVersion(), "1.44") {
|
||||
for _, m := range hostConfig.Mounts {
|
||||
if m.BindOptions != nil {
|
||||
// ReadOnlyNonRecursive can be safely ignored when API < 1.44
|
||||
if m.BindOptions.ReadOnlyForceRecursive {
|
||||
return response, errors.New("bind-recursive=readonly requires API v1.44 or later")
|
||||
}
|
||||
if m.BindOptions.NonRecursive && versions.LessThan(cli.ClientVersion(), "1.40") {
|
||||
return response, errors.New("bind-recursive=disabled requires API v1.40 or later")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd)
|
||||
hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop)
|
||||
|
50
src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go
generated
vendored
50
src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go
generated
vendored
@@ -4,9 +4,11 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cerrdefs "github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/errhttp"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
)
|
||||
|
||||
// errConnectionFailed implements an error returned when connection failed.
|
||||
@@ -48,9 +50,11 @@ func connectionFailed(host string) error {
|
||||
}
|
||||
|
||||
// IsErrNotFound returns true if the error is a NotFound error, which is returned
|
||||
// by the API when some object is not found. It is an alias for [errdefs.IsNotFound].
|
||||
// by the API when some object is not found. It is an alias for [cerrdefs.IsNotFound].
|
||||
//
|
||||
// Deprecated: use [cerrdefs.IsNotFound] instead.
|
||||
func IsErrNotFound(err error) bool {
|
||||
return errdefs.IsNotFound(err)
|
||||
return cerrdefs.IsNotFound(err)
|
||||
}
|
||||
|
||||
type objectNotFoundError struct {
|
||||
@@ -83,3 +87,43 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
err error
|
||||
errdef error
|
||||
}
|
||||
|
||||
func (e *httpError) Error() string {
|
||||
return e.err.Error()
|
||||
}
|
||||
|
||||
func (e *httpError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
func (e *httpError) Is(target error) bool {
|
||||
return errors.Is(e.errdef, target)
|
||||
}
|
||||
|
||||
// httpErrorFromStatusCode creates an errdef error, based on the provided HTTP status-code
|
||||
func httpErrorFromStatusCode(err error, statusCode int) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
base := errhttp.ToNative(statusCode)
|
||||
if base != nil {
|
||||
return &httpError{err: err, errdef: base}
|
||||
}
|
||||
|
||||
switch {
|
||||
case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest:
|
||||
// it's a client error
|
||||
return err
|
||||
case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError:
|
||||
return &httpError{err: err, errdef: cerrdefs.ErrInvalidArgument}
|
||||
case statusCode >= http.StatusInternalServerError && statusCode < 600:
|
||||
return &httpError{err: err, errdef: cerrdefs.ErrInternal}
|
||||
default:
|
||||
return &httpError{err: err, errdef: cerrdefs.ErrUnknown}
|
||||
}
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user