diff --git a/src/cmd/linuxkit/go.mod b/src/cmd/linuxkit/go.mod index f101c5f48..42bc24a5a 100644 --- a/src/cmd/linuxkit/go.mod +++ b/src/cmd/linuxkit/go.mod @@ -13,22 +13,22 @@ require ( github.com/Microsoft/go-winio v0.6.2 github.com/ScaleFT/sshkeys v0.0.0-20181112160850-82451a803681 github.com/aws/aws-sdk-go v1.44.82 - github.com/docker/cli v28.0.0-rc.2+incompatible - github.com/docker/docker v28.0.0-rc.2+incompatible + github.com/docker/cli v28.2.2+incompatible + github.com/docker/docker v28.2.2+incompatible github.com/docker/go-units v0.5.0 github.com/google/go-containerregistry v0.20.3 github.com/google/uuid v1.6.0 github.com/gophercloud/gophercloud v0.1.0 github.com/gophercloud/utils v0.0.0-20181029231510-34f5991525d1 github.com/klauspost/pgzip v1.2.5 - github.com/moby/buildkit v0.20.0 + github.com/moby/buildkit v0.23.1 github.com/moby/hyperkit v0.0.0-20180416161519-d65b09c1c28a //github.com/moby/moby v20.10.3-0.20220728162118-71cb54cec41e+incompatible // indirect github.com/moby/term v0.5.2 github.com/moby/vpnkit v0.4.1-0.20200311130018-2ffc1dd8a84e github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359 - github.com/opencontainers/image-spec v1.1.0 - github.com/opencontainers/runtime-spec v1.2.0 + github.com/opencontainers/image-spec v1.1.1 + github.com/opencontainers/runtime-spec v1.2.1 github.com/pkg/term v1.1.0 github.com/radu-matei/azure-sdk-for-go v5.0.0-beta.0.20161118192335-3b1282355199+incompatible github.com/radu-matei/azure-vhd-utils v0.0.0-20170531165126-e52754d5569d @@ -39,33 +39,32 @@ require ( github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc github.com/vmware/govmomi v0.20.3 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/crypto v0.31.0 - golang.org/x/net v0.33.0 - golang.org/x/oauth2 v0.25.0 - golang.org/x/sync v0.10.0 - golang.org/x/sys v0.29.0 - golang.org/x/term v0.27.0 + golang.org/x/crypto v0.37.0 + golang.org/x/net v0.39.0 + golang.org/x/oauth2 v0.27.0 + golang.org/x/sync v0.14.0 + golang.org/x/sys v0.33.0 + golang.org/x/term v0.31.0 google.golang.org/api v0.149.0 gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/Code-Hex/vz/v3 v3.0.0 - github.com/containerd/containerd/v2 v2.0.2 + github.com/containerd/containerd/v2 v2.1.3 github.com/containerd/platforms v1.0.0-rc.1 github.com/docker/buildx v0.21.1 github.com/equinix/equinix-sdk-go v0.42.0 github.com/hashicorp/go-version v1.7.0 - github.com/in-toto/in-toto-golang v0.5.0 + github.com/in-toto/in-toto-golang v0.9.0 github.com/moby/sys/capability v0.3.0 - github.com/spdx/tools-golang v0.5.3 + github.com/spdx/tools-golang v0.5.5 github.com/spf13/cobra v1.8.1 gopkg.in/yaml.v3 v3.0.1 ) require ( cloud.google.com/go/compute/metadata v0.6.0 // indirect - github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/Azure/go-autorest v14.2.1-0.20210115164004-c0fe8b0fea3d+incompatible // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect @@ -73,8 +72,8 @@ require ( github.com/Azure/go-autorest/tracing v0.6.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect - github.com/containerd/console v1.0.4 // indirect - github.com/containerd/containerd/api v1.8.0 // indirect + github.com/containerd/console v1.0.5 // indirect + github.com/containerd/containerd/api v1.9.0 // indirect github.com/containerd/continuity v0.4.5 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect @@ -87,7 +86,7 @@ require ( github.com/dchest/bcrypt_pbkdf v0.0.0-20150205184540-83f37f9c154a // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.2 // indirect + github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.2 // indirect @@ -97,19 +96,19 @@ require ( github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect - github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b // indirect @@ -123,33 +122,33 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.6.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/smartystreets/goconvey v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a // indirect - github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 // indirect + github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f // indirect + github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect - github.com/vbatts/tar-split v0.11.6 // indirect + github.com/vbatts/tar-split v0.12.1 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.33.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/sdk v1.33.0 // indirect - go.opentelemetry.io/otel/trace v1.33.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/mod v0.22.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.6.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect - google.golang.org/grpc v1.69.4 // indirect - google.golang.org/protobuf v1.36.3 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect + go.opentelemetry.io/otel v1.35.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect + go.opentelemetry.io/otel/metric v1.35.0 // indirect + go.opentelemetry.io/otel/sdk v1.35.0 // indirect + go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/proto/otlp v1.5.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/text v0.24.0 // indirect + golang.org/x/time v0.11.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a // indirect + google.golang.org/grpc v1.72.2 // indirect + google.golang.org/protobuf v1.36.6 // indirect ) diff --git a/src/cmd/linuxkit/go.sum b/src/cmd/linuxkit/go.sum index 110db3b51..0dcf9440c 100644 --- a/src/cmd/linuxkit/go.sum +++ b/src/cmd/linuxkit/go.sum @@ -3,8 +3,6 @@ cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4 cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M= -github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible h1:DmhwMrUIvpeoTDiWRDtNHqelNUd3Og8JCkrLHQK795c= github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= @@ -33,8 +31,8 @@ github.com/Code-Hex/vz/v3 v3.0.0 h1:UuYAZz8NF8n+R4MLfn/lpUlepgzZ8BlhKtioQ+q9LXk= github.com/Code-Hex/vz/v3 v3.0.0/go.mod h1:+xPQOXVzNaZ4OeIIhlJFumtbFhsvRfqKwX7wuZS4dFA= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= -github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= +github.com/Microsoft/hcsshim v0.13.0 h1:/BcXOiS6Qi7N9XqUcv27vkIuVOkBEcWstd2pMlWSeaA= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= github.com/ScaleFT/sshkeys v0.0.0-20181112160850-82451a803681 h1:JS2rl38kZmHgWa0xINSaSYH0Whtvem64/4+Ef0+Y5pE= github.com/ScaleFT/sshkeys v0.0.0-20181112160850-82451a803681/go.mod h1:WfDateMPQ/55dPbZRp5Zxrux5WiEaHsjk9puUhz0KgY= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= @@ -52,12 +50,12 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= -github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= -github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= -github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= -github.com/containerd/containerd/v2 v2.0.2 h1:GmH/tRBlTvrXOLwSpWE2vNAm8+MqI6nmxKpKBNKY8Wc= -github.com/containerd/containerd/v2 v2.0.2/go.mod h1:wIqEvQ/6cyPFUGJ5yMFanspPabMLor+bF865OHvNTTI= +github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc= +github.com/containerd/console v1.0.5/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= +github.com/containerd/containerd/api v1.9.0 h1:HZ/licowTRazus+wt9fM6r/9BQO7S0vD5lMcWspGIg0= +github.com/containerd/containerd/api v1.9.0/go.mod h1:GhghKFmTR3hNtyznBoQ0EMWr9ju5AqHjcZPsSpTKutI= +github.com/containerd/containerd/v2 v2.1.3 h1:eMD2SLcIQPdMlnlNF6fatlrlRLAeDaiGPGwmRKLZKNs= +github.com/containerd/containerd/v2 v2.1.3/go.mod h1:8C5QV9djwsYDNhxfTCFjWtTBZrqjditQ4/ghHSYjnHM= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= @@ -68,8 +66,8 @@ github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= -github.com/containerd/nydus-snapshotter v0.15.0 h1:RqZRs1GPeM6T3wmuxJV9u+2Rg4YETVMwTmiDeX+iWC8= -github.com/containerd/nydus-snapshotter v0.15.0/go.mod h1:biq0ijpeZe0I5yZFSJyHzFSjjRZQ7P7y/OuHyd7hYOw= +github.com/containerd/nydus-snapshotter v0.15.2 h1:qsHI4M+Wwrf6Jr4eBqhNx8qh+YU0dSiJ+WPmcLFWNcg= +github.com/containerd/nydus-snapshotter v0.15.2/go.mod h1:FfwH2KBkNYoisK/e+KsmNr7xTU53DmnavQHMFOcXwfM= github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= github.com/containerd/plugin v1.0.0 h1:c8Kf1TNl6+e2TtMHZt+39yAPDbouRH9WAToRjex483Y= @@ -96,14 +94,14 @@ github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/buildx v0.21.1 h1:YjV2k6CsSDbkDTOMsjARUIrj2xv+zZR+M2dtrRyzXhg= github.com/docker/buildx v0.21.1/go.mod h1:8V4UMnlKsaGYwz83BygmIbJIFEAYGHT6KAv8akDZmqo= -github.com/docker/cli v28.0.0-rc.2+incompatible h1:2N1dpr3qtlJwIQpqXm7oNwWNAUGzpKlsCeJ32ejvpTk= -github.com/docker/cli v28.0.0-rc.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.2.2+incompatible h1:qzx5BNUDFqlvyq4AHzdNB7gSyVTmU4cgsyN9SdInc1A= +github.com/docker/cli v28.2.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v28.0.0-rc.2+incompatible h1:p+Ri+C0mmbPkhYVD9Sxnp/TnNnZoQWEj/EwOC465Uq4= -github.com/docker/docker v28.0.0-rc.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= -github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v28.2.2+incompatible h1:CjwRSksz8Yo4+RmQ339Dp/D2tGO5JxwYeqtMOEe0LDw= +github.com/docker/docker v28.2.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= +github.com/docker/docker-credential-helpers v0.9.3/go.mod h1:x+4Gbw9aGmChi3qTLZj8Dfn0TD20M/fuWy0E5+WDeCo= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -116,8 +114,6 @@ github.com/equinix/equinix-sdk-go v0.42.0 h1:jjgdFs0rx6nOwsu/dLh6ImopD0M1Rn7QIn9 github.com/equinix/equinix-sdk-go v0.42.0/go.mod h1:hEb3XLaedz7xhl/dpPIS6eOIiXNPeqNiVoyDrT6paIg= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -154,8 +150,9 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= @@ -177,8 +174,8 @@ github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25d github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -188,8 +185,8 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/in-toto/in-toto-golang v0.5.0 h1:hb8bgwr0M2hGdDsLjkJ3ZqJ8JFLL/tgYdAxF/XEFBbY= -github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1GdHMCq8+WPxw8/BE= +github.com/in-toto/in-toto-golang v0.9.0 h1:tHny7ac4KgtsfrG6ybU8gVOZux2H8jN05AXJ9EBM1XU= +github.com/in-toto/in-toto-golang v0.9.0/go.mod h1:xsBVrVsHNsB61++S6Dy2vWosKhuA3lUTQd+eF9HdeMo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -200,8 +197,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= -github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -216,8 +213,8 @@ github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b h1:9+ke9YJ9KGWw5AN github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= -github.com/moby/buildkit v0.20.0 h1:aF5RujjQ310Pn6SLL/wQYIrSsPXy0sQ5KvWifwq1h8Y= -github.com/moby/buildkit v0.20.0/go.mod h1:HYFUIK+iGDRxRgdphZ9Nv0y1Fz7mv0HrU7xZoXx217E= +github.com/moby/buildkit v0.23.1 h1:CZtFmPRF+IFG1C8QfPnktGO1Dzzt5JSwtQ5eDqIh+ag= +github.com/moby/buildkit v0.23.1/go.mod h1:keNXljNmKX1T0AtM0bMObc8OV6mA9cOuquVbPcRpU/Y= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/hyperkit v0.0.0-20180416161519-d65b09c1c28a h1:hExo7kltIidoitYnXsnqfvkJXG3YEMbHuQbf9nOMvow= @@ -226,6 +223,8 @@ github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= github.com/moby/sys/capability v0.3.0 h1:kEP+y6te0gEXIaeQhIi0s7vKs/w0RPoH1qPa6jROcVg= github.com/moby/sys/capability v0.3.0/go.mod h1:4g9IK291rVkms3LKCDOoYlnV8xKwoDTpIrNEE35Wq0I= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= @@ -234,8 +233,8 @@ github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7z github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0= github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8= -github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= -github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= @@ -248,14 +247,12 @@ github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359 h1:gIpgn2fMLvl github.com/moul/gotty-client v1.7.1-0.20180526075433-e5589f6df359/go.mod h1:CxM/JGtpRrEPve5H04IhxJrGhxgwxMc6jSP2T4YD60w= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= -github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= -github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626 h1:DmNGcqH3WDbV5k8OJ+esPWbqUOX5rMLR2PMvziDMJi0= -github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= -github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= -github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/opencontainers/runtime-spec v1.2.1 h1:S4k4ryNgEpxW1dzyqffOmhI1BHYcjzU8lpJfSlR0xww= +github.com/opencontainers/runtime-spec v1.2.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.12.0 h1:6n5JV4Cf+4y0KNXW48TLj5DwfXpvWlxXplUkdTrmPb8= +github.com/opencontainers/selinux v1.12.0/go.mod h1:BTPX+bjVbWGXw7ZZWUbdENt8w0htPSrlgOOysQaU62U= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -280,8 +277,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6 h1:C1/pvkxkGN/H03mDxLzItaceYJDBk1HdClgR15suAzI= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.6/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8= -github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE= -github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs= +github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA= +github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -291,8 +288,8 @@ github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+ github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= -github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= -github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= +github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk= +github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -300,28 +297,28 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc h1:iA3Eg1OVd2o0M4M+0PBsBBssMz98L8CUH7x0xVkuyUA= github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc/go.mod h1:zaLNaN+EDnfSnNdWPJJf9OZxWF817w5dt8JNzF9LCVI= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXWgtcZ6KVaPS+wpWOQMaypczzP8ojkMY= -github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw= -github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= -github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= +github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f h1:MoxeMfHAe5Qj/ySSBfL8A7l1V+hxuluj8owsIEEZipI= +github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f/go.mod h1:BKdcez7BiVtBvIcef90ZPc6ebqIWr4JWD7+EvLm6J98= +github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 h1:2f304B10LaZdB8kkVEaoXvAMVan2tl9AiK4G0odjQtE= +github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw= github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab/go.mod h1:ulncasL3N9uLrVann0m+CDlJKWsIAP34MPcOJF6VRvc= -github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= -github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= +github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= +github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vmware/govmomi v0.20.3 h1:gpw/0Ku+6RgF3jsi7fnCLmlcikBHfKBCUcu1qgc16OU= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -337,44 +334,46 @@ go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 h1:4BZHA+B1wXEQoGNHxW8mURaLhcdGwvRnmhGbm+odRbc= go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0/go.mod h1:3qi2EEwMgB4xnKgPLqsDP3j9qxnHDZeHsnAxfjQqTko= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= -go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= +go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= +go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= +go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= +go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= +go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= +go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= +go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= +go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= +go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -387,18 +386,18 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -412,20 +411,20 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= +golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -445,17 +444,17 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw= -google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a h1:nwKuGPlUAt+aR+pcrkfFRrTU1BVrSmYyYMxYbUIVHr0= +google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a/go.mod h1:3kWAYMk1I75K4vykHtKt2ycnOgpA6974V7bREqbsenU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a h1:51aaUVRocpvUOSQKM6Q7VuoaktNIaMCLuhZB6DKksq4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a/go.mod h1:uRxBH1mhmO8PGhU89cMcHaXKZqO+OfakD8QQO0oYwlQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= -google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc v1.72.2 h1:TdbGzwb82ty4OusHWepvFWGLgIbNo1/SUynEN0ssqv8= +google.golang.org/grpc v1.72.2/go.mod h1:wH5Aktxcg25y1I3w7H69nHfXdOG3UiadoBtjh3izSDM= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -465,8 +464,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -481,10 +480,4 @@ gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= -tags.cncf.io/container-device-interface v0.8.0 h1:8bCFo/g9WODjWx3m6EYl3GfUG31eKJbaggyBDxEldRc= -tags.cncf.io/container-device-interface v0.8.0/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y= -tags.cncf.io/container-device-interface/specs-go v0.8.0 h1:QYGFzGxvYK/ZLMrjhvY0RjpUavIn4KcmRmVP/JjdBTA= -tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws= diff --git a/src/cmd/linuxkit/pkg_build.go b/src/cmd/linuxkit/pkg_build.go index 7e5f0eb82..6c00f92e5 100644 --- a/src/cmd/linuxkit/pkg_build.go +++ b/src/cmd/linuxkit/pkg_build.go @@ -15,7 +15,7 @@ import ( const ( buildersEnvVar = "LINUXKIT_BUILDERS" envVarCacheDir = "LINUXKIT_CACHE" - defaultBuilderImage = "moby/buildkit:v0.20.0" + defaultBuilderImage = "moby/buildkit:v0.23.1" ) // some logic clarification: diff --git a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE b/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/README.md b/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/README.md deleted file mode 100644 index 0a0d60c74..000000000 --- a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/README.md +++ /dev/null @@ -1,93 +0,0 @@ -# go-fuzz-headers -This repository contains various helper functions for go fuzzing. It is mostly used in combination with [go-fuzz](https://github.com/dvyukov/go-fuzz), but compatibility with fuzzing in the standard library will also be supported. Any coverage guided fuzzing engine that provides an array or slice of bytes can be used with go-fuzz-headers. - - -## Usage -Using go-fuzz-headers is easy. First create a new consumer with the bytes provided by the fuzzing engine: - -```go -import ( - fuzz "github.com/AdaLogics/go-fuzz-headers" -) -data := []byte{'R', 'a', 'n', 'd', 'o', 'm'} -f := fuzz.NewConsumer(data) - -``` - -This creates a `Consumer` that consumes the bytes of the input as it uses them to fuzz different types. - -After that, `f` can be used to easily create fuzzed instances of different types. Below are some examples: - -### Structs -One of the most useful features of go-fuzz-headers is its ability to fill structs with the data provided by the fuzzing engine. This is done with a single line: -```go -type Person struct { - Name string - Age int -} -p := Person{} -// Fill p with values based on the data provided by the fuzzing engine: -err := f.GenerateStruct(&p) -``` - -This includes nested structs too. In this example, the fuzz Consumer will also insert values in `p.BestFriend`: -```go -type PersonI struct { - Name string - Age int - BestFriend PersonII -} -type PersonII struct { - Name string - Age int -} -p := PersonI{} -err := f.GenerateStruct(&p) -``` - -If the consumer should insert values for unexported fields as well as exported, this can be enabled with: - -```go -f.AllowUnexportedFields() -``` - -...and disabled with: - -```go -f.DisallowUnexportedFields() -``` - -### Other types: - -Other useful APIs: - -```go -createdString, err := f.GetString() // Gets a string -createdInt, err := f.GetInt() // Gets an integer -createdByte, err := f.GetByte() // Gets a byte -createdBytes, err := f.GetBytes() // Gets a byte slice -createdBool, err := f.GetBool() // Gets a boolean -err := f.FuzzMap(target_map) // Fills a map -createdTarBytes, err := f.TarBytes() // Gets bytes of a valid tar archive -err := f.CreateFiles(inThisDir) // Fills inThisDir with files -createdString, err := f.GetStringFrom("anyCharInThisString", ofThisLength) // Gets a string that consists of chars from "anyCharInThisString" and has the exact length "ofThisLength" -``` - -Most APIs are added as they are needed. - -## Projects that use go-fuzz-headers -- [runC](https://github.com/opencontainers/runc) -- [Istio](https://github.com/istio/istio) -- [Vitess](https://github.com/vitessio/vitess) -- [Containerd](https://github.com/containerd/containerd) - -Feel free to add your own project to the list, if you use go-fuzz-headers to fuzz it. - - - - -## Status -The project is under development and will be updated regularly. - -## References -go-fuzz-headers' approach to fuzzing structs is strongly inspired by [gofuzz](https://github.com/google/gofuzz). \ No newline at end of file diff --git a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go b/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go deleted file mode 100644 index 361c9ac69..000000000 --- a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/consumer.go +++ /dev/null @@ -1,960 +0,0 @@ -// Copyright 2023 The go-fuzz-headers Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gofuzzheaders - -import ( - "archive/tar" - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "os" - "path/filepath" - "reflect" - "strconv" - "strings" - "time" - "unsafe" -) - -var ( - MaxTotalLen uint32 = 2000000 - maxDepth = 100 -) - -func SetMaxTotalLen(newLen uint32) { - MaxTotalLen = newLen -} - -type ConsumeFuzzer struct { - data []byte - dataTotal uint32 - CommandPart []byte - RestOfArray []byte - NumberOfCalls int - position uint32 - fuzzUnexportedFields bool - forceUTF8Strings bool - curDepth int - Funcs map[reflect.Type]reflect.Value -} - -func IsDivisibleBy(n int, divisibleby int) bool { - return (n % divisibleby) == 0 -} - -func NewConsumer(fuzzData []byte) *ConsumeFuzzer { - return &ConsumeFuzzer{ - data: fuzzData, - dataTotal: uint32(len(fuzzData)), - Funcs: make(map[reflect.Type]reflect.Value), - curDepth: 0, - } -} - -func (f *ConsumeFuzzer) Split(minCalls, maxCalls int) error { - if f.dataTotal == 0 { - return errors.New("could not split") - } - numberOfCalls := int(f.data[0]) - if numberOfCalls < minCalls || numberOfCalls > maxCalls { - return errors.New("bad number of calls") - } - if int(f.dataTotal) < numberOfCalls+numberOfCalls+1 { - return errors.New("length of data does not match required parameters") - } - - // Define part 2 and 3 of the data array - commandPart := f.data[1 : numberOfCalls+1] - restOfArray := f.data[numberOfCalls+1:] - - // Just a small check. It is necessary - if len(commandPart) != numberOfCalls { - return errors.New("length of commandPart does not match number of calls") - } - - // Check if restOfArray is divisible by numberOfCalls - if !IsDivisibleBy(len(restOfArray), numberOfCalls) { - return errors.New("length of commandPart does not match number of calls") - } - f.CommandPart = commandPart - f.RestOfArray = restOfArray - f.NumberOfCalls = numberOfCalls - return nil -} - -func (f *ConsumeFuzzer) AllowUnexportedFields() { - f.fuzzUnexportedFields = true -} - -func (f *ConsumeFuzzer) DisallowUnexportedFields() { - f.fuzzUnexportedFields = false -} - -func (f *ConsumeFuzzer) AllowNonUTF8Strings() { - f.forceUTF8Strings = false -} - -func (f *ConsumeFuzzer) DisallowNonUTF8Strings() { - f.forceUTF8Strings = true -} - -func (f *ConsumeFuzzer) GenerateStruct(targetStruct interface{}) error { - e := reflect.ValueOf(targetStruct).Elem() - return f.fuzzStruct(e, false) -} - -func (f *ConsumeFuzzer) setCustom(v reflect.Value) error { - // First: see if we have a fuzz function for it. - doCustom, ok := f.Funcs[v.Type()] - if !ok { - return fmt.Errorf("could not find a custom function") - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return fmt.Errorf("could not use a custom function") - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return fmt.Errorf("could not use a custom function") - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return fmt.Errorf("could not use a custom function") - } - - verr := doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - F: f, - })}) - - // check if we return an error - if verr[0].IsNil() { - return nil - } - return fmt.Errorf("could not use a custom function") -} - -func (f *ConsumeFuzzer) fuzzStruct(e reflect.Value, customFunctions bool) error { - if f.curDepth >= maxDepth { - // return err or nil here? - return nil - } - f.curDepth++ - defer func() { f.curDepth-- }() - - // We check if we should check for custom functions - if customFunctions && e.IsValid() && e.CanAddr() { - err := f.setCustom(e.Addr()) - if err != nil { - return err - } - } - - switch e.Kind() { - case reflect.Struct: - for i := 0; i < e.NumField(); i++ { - var v reflect.Value - if !e.Field(i).CanSet() { - if f.fuzzUnexportedFields { - v = reflect.NewAt(e.Field(i).Type(), unsafe.Pointer(e.Field(i).UnsafeAddr())).Elem() - } - if err := f.fuzzStruct(v, customFunctions); err != nil { - return err - } - } else { - v = e.Field(i) - if err := f.fuzzStruct(v, customFunctions); err != nil { - return err - } - } - } - case reflect.String: - str, err := f.GetString() - if err != nil { - return err - } - if e.CanSet() { - e.SetString(str) - } - case reflect.Slice: - var maxElements uint32 - // Byte slices should not be restricted - if e.Type().String() == "[]uint8" { - maxElements = 10000000 - } else { - maxElements = 50 - } - - randQty, err := f.GetUint32() - if err != nil { - return err - } - numOfElements := randQty % maxElements - if (f.dataTotal - f.position) < numOfElements { - numOfElements = f.dataTotal - f.position - } - - uu := reflect.MakeSlice(e.Type(), int(numOfElements), int(numOfElements)) - - for i := 0; i < int(numOfElements); i++ { - // If we have more than 10, then we can proceed with that. - if err := f.fuzzStruct(uu.Index(i), customFunctions); err != nil { - if i >= 10 { - if e.CanSet() { - e.Set(uu) - } - return nil - } else { - return err - } - } - } - if e.CanSet() { - e.Set(uu) - } - case reflect.Uint: - newInt, err := f.GetUint() - if err != nil { - return err - } - if e.CanSet() { - e.SetUint(uint64(newInt)) - } - case reflect.Uint16: - newInt, err := f.GetUint16() - if err != nil { - return err - } - if e.CanSet() { - e.SetUint(uint64(newInt)) - } - case reflect.Uint32: - newInt, err := f.GetUint32() - if err != nil { - return err - } - if e.CanSet() { - e.SetUint(uint64(newInt)) - } - case reflect.Uint64: - newInt, err := f.GetInt() - if err != nil { - return err - } - if e.CanSet() { - e.SetUint(uint64(newInt)) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - newInt, err := f.GetInt() - if err != nil { - return err - } - if e.CanSet() { - e.SetInt(int64(newInt)) - } - case reflect.Float32: - newFloat, err := f.GetFloat32() - if err != nil { - return err - } - if e.CanSet() { - e.SetFloat(float64(newFloat)) - } - case reflect.Float64: - newFloat, err := f.GetFloat64() - if err != nil { - return err - } - if e.CanSet() { - e.SetFloat(float64(newFloat)) - } - case reflect.Map: - if e.CanSet() { - e.Set(reflect.MakeMap(e.Type())) - const maxElements = 50 - randQty, err := f.GetInt() - if err != nil { - return err - } - numOfElements := randQty % maxElements - for i := 0; i < numOfElements; i++ { - key := reflect.New(e.Type().Key()).Elem() - if err := f.fuzzStruct(key, customFunctions); err != nil { - return err - } - val := reflect.New(e.Type().Elem()).Elem() - if err = f.fuzzStruct(val, customFunctions); err != nil { - return err - } - e.SetMapIndex(key, val) - } - } - case reflect.Ptr: - if e.CanSet() { - e.Set(reflect.New(e.Type().Elem())) - if err := f.fuzzStruct(e.Elem(), customFunctions); err != nil { - return err - } - return nil - } - case reflect.Uint8: - b, err := f.GetByte() - if err != nil { - return err - } - if e.CanSet() { - e.SetUint(uint64(b)) - } - case reflect.Bool: - b, err := f.GetBool() - if err != nil { - return err - } - if e.CanSet() { - e.SetBool(b) - } - } - return nil -} - -func (f *ConsumeFuzzer) GetStringArray() (reflect.Value, error) { - // The max size of the array: - const max uint32 = 20 - - arraySize := f.position - if arraySize > max { - arraySize = max - } - stringArray := reflect.MakeSlice(reflect.SliceOf(reflect.TypeOf("string")), int(arraySize), int(arraySize)) - if f.position+arraySize >= f.dataTotal { - return stringArray, errors.New("could not make string array") - } - - for i := 0; i < int(arraySize); i++ { - stringSize := uint32(f.data[f.position]) - if f.position+stringSize >= f.dataTotal { - return stringArray, nil - } - stringToAppend := string(f.data[f.position : f.position+stringSize]) - strVal := reflect.ValueOf(stringToAppend) - stringArray = reflect.Append(stringArray, strVal) - f.position += stringSize - } - return stringArray, nil -} - -func (f *ConsumeFuzzer) GetInt() (int, error) { - if f.position >= f.dataTotal { - return 0, errors.New("not enough bytes to create int") - } - returnInt := int(f.data[f.position]) - f.position++ - return returnInt, nil -} - -func (f *ConsumeFuzzer) GetByte() (byte, error) { - if f.position >= f.dataTotal { - return 0x00, errors.New("not enough bytes to get byte") - } - returnByte := f.data[f.position] - f.position++ - return returnByte, nil -} - -func (f *ConsumeFuzzer) GetNBytes(numberOfBytes int) ([]byte, error) { - if f.position >= f.dataTotal { - return nil, errors.New("not enough bytes to get byte") - } - returnBytes := make([]byte, 0, numberOfBytes) - for i := 0; i < numberOfBytes; i++ { - newByte, err := f.GetByte() - if err != nil { - return nil, err - } - returnBytes = append(returnBytes, newByte) - } - return returnBytes, nil -} - -func (f *ConsumeFuzzer) GetUint16() (uint16, error) { - u16, err := f.GetNBytes(2) - if err != nil { - return 0, err - } - littleEndian, err := f.GetBool() - if err != nil { - return 0, err - } - if littleEndian { - return binary.LittleEndian.Uint16(u16), nil - } - return binary.BigEndian.Uint16(u16), nil -} - -func (f *ConsumeFuzzer) GetUint32() (uint32, error) { - u32, err := f.GetNBytes(4) - if err != nil { - return 0, err - } - return binary.BigEndian.Uint32(u32), nil -} - -func (f *ConsumeFuzzer) GetUint64() (uint64, error) { - u64, err := f.GetNBytes(8) - if err != nil { - return 0, err - } - littleEndian, err := f.GetBool() - if err != nil { - return 0, err - } - if littleEndian { - return binary.LittleEndian.Uint64(u64), nil - } - return binary.BigEndian.Uint64(u64), nil -} - -func (f *ConsumeFuzzer) GetUint() (uint, error) { - var zero uint - size := int(unsafe.Sizeof(zero)) - if size == 8 { - u64, err := f.GetUint64() - if err != nil { - return 0, err - } - return uint(u64), nil - } - u32, err := f.GetUint32() - if err != nil { - return 0, err - } - return uint(u32), nil -} - -func (f *ConsumeFuzzer) GetBytes() ([]byte, error) { - var length uint32 - var err error - length, err = f.GetUint32() - if err != nil { - return nil, errors.New("not enough bytes to create byte array") - } - - if length == 0 { - length = 30 - } - bytesLeft := f.dataTotal - f.position - if bytesLeft <= 0 { - return nil, errors.New("not enough bytes to create byte array") - } - - // If the length is the same as bytes left, we will not overflow - // the remaining bytes. - if length != bytesLeft { - length = length % bytesLeft - } - byteBegin := f.position - if byteBegin+length < byteBegin { - return nil, errors.New("numbers overflow") - } - f.position = byteBegin + length - return f.data[byteBegin:f.position], nil -} - -func (f *ConsumeFuzzer) GetString() (string, error) { - if f.position >= f.dataTotal { - return "nil", errors.New("not enough bytes to create string") - } - length, err := f.GetUint32() - if err != nil { - return "nil", errors.New("not enough bytes to create string") - } - if f.position > MaxTotalLen { - return "nil", errors.New("created too large a string") - } - byteBegin := f.position - if byteBegin >= f.dataTotal { - return "nil", errors.New("not enough bytes to create string") - } - if byteBegin+length > f.dataTotal { - return "nil", errors.New("not enough bytes to create string") - } - if byteBegin > byteBegin+length { - return "nil", errors.New("numbers overflow") - } - f.position = byteBegin + length - s := string(f.data[byteBegin:f.position]) - if f.forceUTF8Strings { - s = strings.ToValidUTF8(s, "") - } - return s, nil -} - -func (f *ConsumeFuzzer) GetBool() (bool, error) { - if f.position >= f.dataTotal { - return false, errors.New("not enough bytes to create bool") - } - if IsDivisibleBy(int(f.data[f.position]), 2) { - f.position++ - return true, nil - } else { - f.position++ - return false, nil - } -} - -func (f *ConsumeFuzzer) FuzzMap(m interface{}) error { - return f.GenerateStruct(m) -} - -func returnTarBytes(buf []byte) ([]byte, error) { - return buf, nil - // Count files - var fileCounter int - tr := tar.NewReader(bytes.NewReader(buf)) - for { - _, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - fileCounter++ - } - if fileCounter >= 1 { - return buf, nil - } - return nil, fmt.Errorf("not enough files were created\n") -} - -func setTarHeaderFormat(hdr *tar.Header, f *ConsumeFuzzer) error { - ind, err := f.GetInt() - if err != nil { - hdr.Format = tar.FormatGNU - //return nil - } - switch ind % 4 { - case 0: - hdr.Format = tar.FormatUnknown - case 1: - hdr.Format = tar.FormatUSTAR - case 2: - hdr.Format = tar.FormatPAX - case 3: - hdr.Format = tar.FormatGNU - } - return nil -} - -func setTarHeaderTypeflag(hdr *tar.Header, f *ConsumeFuzzer) error { - ind, err := f.GetInt() - if err != nil { - return err - } - switch ind % 13 { - case 0: - hdr.Typeflag = tar.TypeReg - case 1: - hdr.Typeflag = tar.TypeLink - linkname, err := f.GetString() - if err != nil { - return err - } - hdr.Linkname = linkname - case 2: - hdr.Typeflag = tar.TypeSymlink - linkname, err := f.GetString() - if err != nil { - return err - } - hdr.Linkname = linkname - case 3: - hdr.Typeflag = tar.TypeChar - case 4: - hdr.Typeflag = tar.TypeBlock - case 5: - hdr.Typeflag = tar.TypeDir - case 6: - hdr.Typeflag = tar.TypeFifo - case 7: - hdr.Typeflag = tar.TypeCont - case 8: - hdr.Typeflag = tar.TypeXHeader - case 9: - hdr.Typeflag = tar.TypeXGlobalHeader - case 10: - hdr.Typeflag = tar.TypeGNUSparse - case 11: - hdr.Typeflag = tar.TypeGNULongName - case 12: - hdr.Typeflag = tar.TypeGNULongLink - } - return nil -} - -func (f *ConsumeFuzzer) createTarFileBody() ([]byte, error) { - return f.GetBytes() - /*length, err := f.GetUint32() - if err != nil { - return nil, errors.New("not enough bytes to create byte array") - } - - // A bit of optimization to attempt to create a file body - // when we don't have as many bytes left as "length" - remainingBytes := f.dataTotal - f.position - if remainingBytes <= 0 { - return nil, errors.New("created too large a string") - } - if f.position+length > MaxTotalLen { - return nil, errors.New("created too large a string") - } - byteBegin := f.position - if byteBegin >= f.dataTotal { - return nil, errors.New("not enough bytes to create byte array") - } - if length == 0 { - return nil, errors.New("zero-length is not supported") - } - if byteBegin+length >= f.dataTotal { - return nil, errors.New("not enough bytes to create byte array") - } - if byteBegin+length < byteBegin { - return nil, errors.New("numbers overflow") - } - f.position = byteBegin + length - return f.data[byteBegin:f.position], nil*/ -} - -// getTarFileName is similar to GetString(), but creates string based -// on the length of f.data to reduce the likelihood of overflowing -// f.data. -func (f *ConsumeFuzzer) getTarFilename() (string, error) { - return f.GetString() - /*length, err := f.GetUint32() - if err != nil { - return "nil", errors.New("not enough bytes to create string") - } - - // A bit of optimization to attempt to create a file name - // when we don't have as many bytes left as "length" - remainingBytes := f.dataTotal - f.position - if remainingBytes <= 0 { - return "nil", errors.New("created too large a string") - } - if f.position > MaxTotalLen { - return "nil", errors.New("created too large a string") - } - byteBegin := f.position - if byteBegin >= f.dataTotal { - return "nil", errors.New("not enough bytes to create string") - } - if byteBegin+length > f.dataTotal { - return "nil", errors.New("not enough bytes to create string") - } - if byteBegin > byteBegin+length { - return "nil", errors.New("numbers overflow") - } - f.position = byteBegin + length - return string(f.data[byteBegin:f.position]), nil*/ -} - -type TarFile struct { - Hdr *tar.Header - Body []byte -} - -// TarBytes returns valid bytes for a tar archive -func (f *ConsumeFuzzer) TarBytes() ([]byte, error) { - numberOfFiles, err := f.GetInt() - if err != nil { - return nil, err - } - var tarFiles []*TarFile - tarFiles = make([]*TarFile, 0) - - const maxNoOfFiles = 100 - for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { - var filename string - var filebody []byte - var sec, nsec int - var err error - - filename, err = f.getTarFilename() - if err != nil { - var sb strings.Builder - sb.WriteString("file-") - sb.WriteString(strconv.Itoa(i)) - filename = sb.String() - } - filebody, err = f.createTarFileBody() - if err != nil { - var sb strings.Builder - sb.WriteString("filebody-") - sb.WriteString(strconv.Itoa(i)) - filebody = []byte(sb.String()) - } - - sec, err = f.GetInt() - if err != nil { - sec = 1672531200 // beginning of 2023 - } - nsec, err = f.GetInt() - if err != nil { - nsec = 1703980800 // end of 2023 - } - - hdr := &tar.Header{ - Name: filename, - Size: int64(len(filebody)), - Mode: 0o600, - ModTime: time.Unix(int64(sec), int64(nsec)), - } - if err := setTarHeaderTypeflag(hdr, f); err != nil { - return []byte(""), err - } - if err := setTarHeaderFormat(hdr, f); err != nil { - return []byte(""), err - } - tf := &TarFile{ - Hdr: hdr, - Body: filebody, - } - tarFiles = append(tarFiles, tf) - } - - var buf bytes.Buffer - tw := tar.NewWriter(&buf) - defer tw.Close() - - for _, tf := range tarFiles { - tw.WriteHeader(tf.Hdr) - tw.Write(tf.Body) - } - return buf.Bytes(), nil -} - -// This is similar to TarBytes, but it returns a series of -// files instead of raw tar bytes. The advantage of this -// api is that it is cheaper in terms of cpu power to -// modify or check the files in the fuzzer with TarFiles() -// because it avoids creating a tar reader. -func (f *ConsumeFuzzer) TarFiles() ([]*TarFile, error) { - numberOfFiles, err := f.GetInt() - if err != nil { - return nil, err - } - var tarFiles []*TarFile - tarFiles = make([]*TarFile, 0) - - const maxNoOfFiles = 100 - for i := 0; i < numberOfFiles%maxNoOfFiles; i++ { - filename, err := f.getTarFilename() - if err != nil { - return tarFiles, err - } - filebody, err := f.createTarFileBody() - if err != nil { - return tarFiles, err - } - - sec, err := f.GetInt() - if err != nil { - return tarFiles, err - } - nsec, err := f.GetInt() - if err != nil { - return tarFiles, err - } - - hdr := &tar.Header{ - Name: filename, - Size: int64(len(filebody)), - Mode: 0o600, - ModTime: time.Unix(int64(sec), int64(nsec)), - } - if err := setTarHeaderTypeflag(hdr, f); err != nil { - hdr.Typeflag = tar.TypeReg - } - if err := setTarHeaderFormat(hdr, f); err != nil { - return tarFiles, err // should not happend - } - tf := &TarFile{ - Hdr: hdr, - Body: filebody, - } - tarFiles = append(tarFiles, tf) - } - return tarFiles, nil -} - -// CreateFiles creates pseudo-random files in rootDir. -// It creates subdirs and places the files there. -// It is the callers responsibility to ensure that -// rootDir exists. -func (f *ConsumeFuzzer) CreateFiles(rootDir string) error { - numberOfFiles, err := f.GetInt() - if err != nil { - return err - } - maxNumberOfFiles := numberOfFiles % 4000 // This is completely arbitrary - if maxNumberOfFiles == 0 { - return errors.New("maxNumberOfFiles is nil") - } - - var noOfCreatedFiles int - for i := 0; i < maxNumberOfFiles; i++ { - // The file to create: - fileName, err := f.GetString() - if err != nil { - if noOfCreatedFiles > 0 { - // If files have been created, we don't return an error. - break - } else { - return errors.New("could not get fileName") - } - } - if strings.Contains(fileName, "..") || (len(fileName) > 0 && fileName[0] == 47) || strings.Contains(fileName, "\\") { - continue - } - fullFilePath := filepath.Join(rootDir, fileName) - - // Find the subdirectory of the file - if subDir := filepath.Dir(fileName); subDir != "" && subDir != "." { - // create the dir first; avoid going outside the root dir - if strings.Contains(subDir, "../") || (len(subDir) > 0 && subDir[0] == 47) || strings.Contains(subDir, "\\") { - continue - } - dirPath := filepath.Join(rootDir, subDir) - if _, err := os.Stat(dirPath); os.IsNotExist(err) { - err2 := os.MkdirAll(dirPath, 0o777) - if err2 != nil { - continue - } - } - fullFilePath = filepath.Join(dirPath, fileName) - } else { - // Create symlink - createSymlink, err := f.GetBool() - if err != nil { - if noOfCreatedFiles > 0 { - break - } else { - return errors.New("could not create the symlink") - } - } - if createSymlink { - symlinkTarget, err := f.GetString() - if err != nil { - return err - } - err = os.Symlink(symlinkTarget, fullFilePath) - if err != nil { - return err - } - // stop loop here, since a symlink needs no further action - noOfCreatedFiles++ - continue - } - // We create a normal file - fileContents, err := f.GetBytes() - if err != nil { - if noOfCreatedFiles > 0 { - break - } else { - return errors.New("could not create the file") - } - } - err = os.WriteFile(fullFilePath, fileContents, 0o666) - if err != nil { - continue - } - noOfCreatedFiles++ - } - } - return nil -} - -// GetStringFrom returns a string that can only consist of characters -// included in possibleChars. It returns an error if the created string -// does not have the specified length. -func (f *ConsumeFuzzer) GetStringFrom(possibleChars string, length int) (string, error) { - if (f.dataTotal - f.position) < uint32(length) { - return "", errors.New("not enough bytes to create a string") - } - output := make([]byte, 0, length) - for i := 0; i < length; i++ { - charIndex, err := f.GetInt() - if err != nil { - return string(output), err - } - output = append(output, possibleChars[charIndex%len(possibleChars)]) - } - return string(output), nil -} - -func (f *ConsumeFuzzer) GetRune() ([]rune, error) { - stringToConvert, err := f.GetString() - if err != nil { - return []rune("nil"), err - } - return []rune(stringToConvert), nil -} - -func (f *ConsumeFuzzer) GetFloat32() (float32, error) { - u32, err := f.GetNBytes(4) - if err != nil { - return 0, err - } - littleEndian, err := f.GetBool() - if err != nil { - return 0, err - } - if littleEndian { - u32LE := binary.LittleEndian.Uint32(u32) - return math.Float32frombits(u32LE), nil - } - u32BE := binary.BigEndian.Uint32(u32) - return math.Float32frombits(u32BE), nil -} - -func (f *ConsumeFuzzer) GetFloat64() (float64, error) { - u64, err := f.GetNBytes(8) - if err != nil { - return 0, err - } - littleEndian, err := f.GetBool() - if err != nil { - return 0, err - } - if littleEndian { - u64LE := binary.LittleEndian.Uint64(u64) - return math.Float64frombits(u64LE), nil - } - u64BE := binary.BigEndian.Uint64(u64) - return math.Float64frombits(u64BE), nil -} - -func (f *ConsumeFuzzer) CreateSlice(targetSlice interface{}) error { - return f.GenerateStruct(targetSlice) -} diff --git a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go b/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go deleted file mode 100644 index 8ca3a61b8..000000000 --- a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/funcs.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2023 The go-fuzz-headers Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gofuzzheaders - -import ( - "fmt" - "reflect" -) - -type Continue struct { - F *ConsumeFuzzer -} - -func (f *ConsumeFuzzer) AddFuncs(fuzzFuncs []interface{}) { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 1 { - fmt.Println(t.NumIn(), t.NumOut()) - - panic("Need 2 in and 1 out params. In must be the type. Out must be an error") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type Continue") - } - f.Funcs[argT] = v - } -} - -func (f *ConsumeFuzzer) GenerateWithCustom(targetStruct interface{}) error { - e := reflect.ValueOf(targetStruct).Elem() - return f.fuzzStruct(e, true) -} - -func (c Continue) GenerateStruct(targetStruct interface{}) error { - return c.F.GenerateStruct(targetStruct) -} - -func (c Continue) GenerateStructWithCustom(targetStruct interface{}) error { - return c.F.GenerateWithCustom(targetStruct) -} diff --git a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go b/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go deleted file mode 100644 index 2afd49f84..000000000 --- a/src/cmd/linuxkit/vendor/github.com/AdaLogics/go-fuzz-headers/sql.go +++ /dev/null @@ -1,556 +0,0 @@ -// Copyright 2023 The go-fuzz-headers Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gofuzzheaders - -import ( - "fmt" - "strings" -) - -// returns a keyword by index -func getKeyword(f *ConsumeFuzzer) (string, error) { - index, err := f.GetInt() - if err != nil { - return keywords[0], err - } - for i, k := range keywords { - if i == index { - return k, nil - } - } - return keywords[0], fmt.Errorf("could not get a kw") -} - -// Simple utility function to check if a string -// slice contains a string. -func containsString(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} - -// These keywords are used specifically for fuzzing Vitess -var keywords = []string{ - "accessible", "action", "add", "after", "against", "algorithm", - "all", "alter", "always", "analyze", "and", "as", "asc", "asensitive", - "auto_increment", "avg_row_length", "before", "begin", "between", - "bigint", "binary", "_binary", "_utf8mb4", "_utf8", "_latin1", "bit", - "blob", "bool", "boolean", "both", "by", "call", "cancel", "cascade", - "cascaded", "case", "cast", "channel", "change", "char", "character", - "charset", "check", "checksum", "coalesce", "code", "collate", "collation", - "column", "columns", "comment", "committed", "commit", "compact", "complete", - "compressed", "compression", "condition", "connection", "constraint", "continue", - "convert", "copy", "cume_dist", "substr", "substring", "create", "cross", - "csv", "current_date", "current_time", "current_timestamp", "current_user", - "cursor", "data", "database", "databases", "day", "day_hour", "day_microsecond", - "day_minute", "day_second", "date", "datetime", "dec", "decimal", "declare", - "default", "definer", "delay_key_write", "delayed", "delete", "dense_rank", - "desc", "describe", "deterministic", "directory", "disable", "discard", - "disk", "distinct", "distinctrow", "div", "double", "do", "drop", "dumpfile", - "duplicate", "dynamic", "each", "else", "elseif", "empty", "enable", - "enclosed", "encryption", "end", "enforced", "engine", "engines", "enum", - "error", "escape", "escaped", "event", "exchange", "exclusive", "exists", - "exit", "explain", "expansion", "export", "extended", "extract", "false", - "fetch", "fields", "first", "first_value", "fixed", "float", "float4", - "float8", "flush", "for", "force", "foreign", "format", "from", "full", - "fulltext", "function", "general", "generated", "geometry", "geometrycollection", - "get", "global", "gtid_executed", "grant", "group", "grouping", "groups", - "group_concat", "having", "header", "high_priority", "hosts", "hour", "hour_microsecond", - "hour_minute", "hour_second", "if", "ignore", "import", "in", "index", "indexes", - "infile", "inout", "inner", "inplace", "insensitive", "insert", "insert_method", - "int", "int1", "int2", "int3", "int4", "int8", "integer", "interval", - "into", "io_after_gtids", "is", "isolation", "iterate", "invoker", "join", - "json", "json_table", "key", "keys", "keyspaces", "key_block_size", "kill", "lag", - "language", "last", "last_value", "last_insert_id", "lateral", "lead", "leading", - "leave", "left", "less", "level", "like", "limit", "linear", "lines", - "linestring", "load", "local", "localtime", "localtimestamp", "lock", "logs", - "long", "longblob", "longtext", "loop", "low_priority", "manifest", - "master_bind", "match", "max_rows", "maxvalue", "mediumblob", "mediumint", - "mediumtext", "memory", "merge", "microsecond", "middleint", "min_rows", "minute", - "minute_microsecond", "minute_second", "mod", "mode", "modify", "modifies", - "multilinestring", "multipoint", "multipolygon", "month", "name", - "names", "natural", "nchar", "next", "no", "none", "not", "no_write_to_binlog", - "nth_value", "ntile", "null", "numeric", "of", "off", "offset", "on", - "only", "open", "optimize", "optimizer_costs", "option", "optionally", - "or", "order", "out", "outer", "outfile", "over", "overwrite", "pack_keys", - "parser", "partition", "partitioning", "password", "percent_rank", "plugins", - "point", "polygon", "precision", "primary", "privileges", "processlist", - "procedure", "query", "quarter", "range", "rank", "read", "reads", "read_write", - "real", "rebuild", "recursive", "redundant", "references", "regexp", "relay", - "release", "remove", "rename", "reorganize", "repair", "repeat", "repeatable", - "replace", "require", "resignal", "restrict", "return", "retry", "revert", - "revoke", "right", "rlike", "rollback", "row", "row_format", "row_number", - "rows", "s3", "savepoint", "schema", "schemas", "second", "second_microsecond", - "security", "select", "sensitive", "separator", "sequence", "serializable", - "session", "set", "share", "shared", "show", "signal", "signed", "slow", - "smallint", "spatial", "specific", "sql", "sqlexception", "sqlstate", - "sqlwarning", "sql_big_result", "sql_cache", "sql_calc_found_rows", - "sql_no_cache", "sql_small_result", "ssl", "start", "starting", - "stats_auto_recalc", "stats_persistent", "stats_sample_pages", "status", - "storage", "stored", "straight_join", "stream", "system", "vstream", - "table", "tables", "tablespace", "temporary", "temptable", "terminated", - "text", "than", "then", "time", "timestamp", "timestampadd", "timestampdiff", - "tinyblob", "tinyint", "tinytext", "to", "trailing", "transaction", "tree", - "traditional", "trigger", "triggers", "true", "truncate", "uncommitted", - "undefined", "undo", "union", "unique", "unlock", "unsigned", "update", - "upgrade", "usage", "use", "user", "user_resources", "using", "utc_date", - "utc_time", "utc_timestamp", "validation", "values", "variables", "varbinary", - "varchar", "varcharacter", "varying", "vgtid_executed", "virtual", "vindex", - "vindexes", "view", "vitess", "vitess_keyspaces", "vitess_metadata", - "vitess_migration", "vitess_migrations", "vitess_replication_status", - "vitess_shards", "vitess_tablets", "vschema", "warnings", "when", - "where", "while", "window", "with", "without", "work", "write", "xor", - "year", "year_month", "zerofill", -} - -// Keywords that could get an additional keyword -var needCustomString = []string{ - "DISTINCTROW", "FROM", // Select keywords: - "GROUP BY", "HAVING", "WINDOW", - "FOR", - "ORDER BY", "LIMIT", - "INTO", "PARTITION", "AS", // Insert Keywords: - "ON DUPLICATE KEY UPDATE", - "WHERE", "LIMIT", // Delete keywords - "INFILE", "INTO TABLE", "CHARACTER SET", // Load keywords - "TERMINATED BY", "ENCLOSED BY", - "ESCAPED BY", "STARTING BY", - "TERMINATED BY", "STARTING BY", - "IGNORE", - "VALUE", "VALUES", // Replace tokens - "SET", // Update tokens - "ENGINE =", // Drop tokens - "DEFINER =", "ON SCHEDULE", "RENAME TO", // Alter tokens - "COMMENT", "DO", "INITIAL_SIZE = ", "OPTIONS", -} - -var alterTableTokens = [][]string{ - {"CUSTOM_FUZZ_STRING"}, - {"CUSTOM_ALTTER_TABLE_OPTIONS"}, - {"PARTITION_OPTIONS_FOR_ALTER_TABLE"}, -} - -var alterTokens = [][]string{ - { - "DATABASE", "SCHEMA", "DEFINER = ", "EVENT", "FUNCTION", "INSTANCE", - "LOGFILE GROUP", "PROCEDURE", "SERVER", - }, - {"CUSTOM_FUZZ_STRING"}, - { - "ON SCHEDULE", "ON COMPLETION PRESERVE", "ON COMPLETION NOT PRESERVE", - "ADD UNDOFILE", "OPTIONS", - }, - {"RENAME TO", "INITIAL_SIZE = "}, - {"ENABLE", "DISABLE", "DISABLE ON SLAVE", "ENGINE"}, - {"COMMENT"}, - {"DO"}, -} - -var setTokens = [][]string{ - {"CHARACTER SET", "CHARSET", "CUSTOM_FUZZ_STRING", "NAMES"}, - {"CUSTOM_FUZZ_STRING", "DEFAULT", "="}, - {"CUSTOM_FUZZ_STRING"}, -} - -var dropTokens = [][]string{ - {"TEMPORARY", "UNDO"}, - { - "DATABASE", "SCHEMA", "EVENT", "INDEX", "LOGFILE GROUP", - "PROCEDURE", "FUNCTION", "SERVER", "SPATIAL REFERENCE SYSTEM", - "TABLE", "TABLESPACE", "TRIGGER", "VIEW", - }, - {"IF EXISTS"}, - {"CUSTOM_FUZZ_STRING"}, - {"ON", "ENGINE = ", "RESTRICT", "CASCADE"}, -} - -var renameTokens = [][]string{ - {"TABLE"}, - {"CUSTOM_FUZZ_STRING"}, - {"TO"}, - {"CUSTOM_FUZZ_STRING"}, -} - -var truncateTokens = [][]string{ - {"TABLE"}, - {"CUSTOM_FUZZ_STRING"}, -} - -var createTokens = [][]string{ - {"OR REPLACE", "TEMPORARY", "UNDO"}, // For create spatial reference system - { - "UNIQUE", "FULLTEXT", "SPATIAL", "ALGORITHM = UNDEFINED", "ALGORITHM = MERGE", - "ALGORITHM = TEMPTABLE", - }, - { - "DATABASE", "SCHEMA", "EVENT", "FUNCTION", "INDEX", "LOGFILE GROUP", - "PROCEDURE", "SERVER", "SPATIAL REFERENCE SYSTEM", "TABLE", "TABLESPACE", - "TRIGGER", "VIEW", - }, - {"IF NOT EXISTS"}, - {"CUSTOM_FUZZ_STRING"}, -} - -/* -// For future use. -var updateTokens = [][]string{ - {"LOW_PRIORITY"}, - {"IGNORE"}, - {"SET"}, - {"WHERE"}, - {"ORDER BY"}, - {"LIMIT"}, -} -*/ - -var replaceTokens = [][]string{ - {"LOW_PRIORITY", "DELAYED"}, - {"INTO"}, - {"PARTITION"}, - {"CUSTOM_FUZZ_STRING"}, - {"VALUES", "VALUE"}, -} - -var loadTokens = [][]string{ - {"DATA"}, - {"LOW_PRIORITY", "CONCURRENT", "LOCAL"}, - {"INFILE"}, - {"REPLACE", "IGNORE"}, - {"INTO TABLE"}, - {"PARTITION"}, - {"CHARACTER SET"}, - {"FIELDS", "COLUMNS"}, - {"TERMINATED BY"}, - {"OPTIONALLY"}, - {"ENCLOSED BY"}, - {"ESCAPED BY"}, - {"LINES"}, - {"STARTING BY"}, - {"TERMINATED BY"}, - {"IGNORE"}, - {"LINES", "ROWS"}, - {"CUSTOM_FUZZ_STRING"}, -} - -// These Are everything that comes after "INSERT" -var insertTokens = [][]string{ - {"LOW_PRIORITY", "DELAYED", "HIGH_PRIORITY", "IGNORE"}, - {"INTO"}, - {"PARTITION"}, - {"CUSTOM_FUZZ_STRING"}, - {"AS"}, - {"ON DUPLICATE KEY UPDATE"}, -} - -// These are everything that comes after "SELECT" -var selectTokens = [][]string{ - {"*", "CUSTOM_FUZZ_STRING", "DISTINCTROW"}, - {"HIGH_PRIORITY"}, - {"STRAIGHT_JOIN"}, - {"SQL_SMALL_RESULT", "SQL_BIG_RESULT", "SQL_BUFFER_RESULT"}, - {"SQL_NO_CACHE", "SQL_CALC_FOUND_ROWS"}, - {"CUSTOM_FUZZ_STRING"}, - {"FROM"}, - {"WHERE"}, - {"GROUP BY"}, - {"HAVING"}, - {"WINDOW"}, - {"ORDER BY"}, - {"LIMIT"}, - {"CUSTOM_FUZZ_STRING"}, - {"FOR"}, -} - -// These are everything that comes after "DELETE" -var deleteTokens = [][]string{ - {"LOW_PRIORITY", "QUICK", "IGNORE", "FROM", "AS"}, - {"PARTITION"}, - {"WHERE"}, - {"ORDER BY"}, - {"LIMIT"}, -} - -var alter_table_options = []string{ - "ADD", "COLUMN", "FIRST", "AFTER", "INDEX", "KEY", "FULLTEXT", "SPATIAL", - "CONSTRAINT", "UNIQUE", "FOREIGN KEY", "CHECK", "ENFORCED", "DROP", "ALTER", - "NOT", "INPLACE", "COPY", "SET", "VISIBLE", "INVISIBLE", "DEFAULT", "CHANGE", - "CHARACTER SET", "COLLATE", "DISABLE", "ENABLE", "KEYS", "TABLESPACE", "LOCK", - "FORCE", "MODIFY", "SHARED", "EXCLUSIVE", "NONE", "ORDER BY", "RENAME COLUMN", - "AS", "=", "ASC", "DESC", "WITH", "WITHOUT", "VALIDATION", "ADD PARTITION", - "DROP PARTITION", "DISCARD PARTITION", "IMPORT PARTITION", "TRUNCATE PARTITION", - "COALESCE PARTITION", "REORGANIZE PARTITION", "EXCHANGE PARTITION", - "ANALYZE PARTITION", "CHECK PARTITION", "OPTIMIZE PARTITION", "REBUILD PARTITION", - "REPAIR PARTITION", "REMOVE PARTITIONING", "USING", "BTREE", "HASH", "COMMENT", - "KEY_BLOCK_SIZE", "WITH PARSER", "AUTOEXTEND_SIZE", "AUTO_INCREMENT", "AVG_ROW_LENGTH", - "CHECKSUM", "INSERT_METHOD", "ROW_FORMAT", "DYNAMIC", "FIXED", "COMPRESSED", "REDUNDANT", - "COMPACT", "SECONDARY_ENGINE_ATTRIBUTE", "STATS_AUTO_RECALC", "STATS_PERSISTENT", - "STATS_SAMPLE_PAGES", "ZLIB", "LZ4", "ENGINE_ATTRIBUTE", "KEY_BLOCK_SIZE", "MAX_ROWS", - "MIN_ROWS", "PACK_KEYS", "PASSWORD", "COMPRESSION", "CONNECTION", "DIRECTORY", - "DELAY_KEY_WRITE", "ENCRYPTION", "STORAGE", "DISK", "MEMORY", "UNION", -} - -// Creates an 'alter table' statement. 'alter table' is an exception -// in that it has its own function. The majority of statements -// are created by 'createStmt()'. -func createAlterTableStmt(f *ConsumeFuzzer) (string, error) { - maxArgs, err := f.GetInt() - if err != nil { - return "", err - } - maxArgs = maxArgs % 30 - if maxArgs == 0 { - return "", fmt.Errorf("could not create alter table stmt") - } - - var stmt strings.Builder - stmt.WriteString("ALTER TABLE ") - for i := 0; i < maxArgs; i++ { - // Calculate if we get existing token or custom string - tokenType, err := f.GetInt() - if err != nil { - return "", err - } - if tokenType%4 == 1 { - customString, err := f.GetString() - if err != nil { - return "", err - } - stmt.WriteString(" " + customString) - } else { - tokenIndex, err := f.GetInt() - if err != nil { - return "", err - } - stmt.WriteString(" " + alter_table_options[tokenIndex%len(alter_table_options)]) - } - } - return stmt.String(), nil -} - -func chooseToken(tokens []string, f *ConsumeFuzzer) (string, error) { - index, err := f.GetInt() - if err != nil { - return "", err - } - var token strings.Builder - token.WriteString(tokens[index%len(tokens)]) - if token.String() == "CUSTOM_FUZZ_STRING" { - customFuzzString, err := f.GetString() - if err != nil { - return "", err - } - return customFuzzString, nil - } - - // Check if token requires an argument - if containsString(needCustomString, token.String()) { - customFuzzString, err := f.GetString() - if err != nil { - return "", err - } - token.WriteString(" " + customFuzzString) - } - return token.String(), nil -} - -var stmtTypes = map[string][][]string{ - "DELETE": deleteTokens, - "INSERT": insertTokens, - "SELECT": selectTokens, - "LOAD": loadTokens, - "REPLACE": replaceTokens, - "CREATE": createTokens, - "DROP": dropTokens, - "RENAME": renameTokens, - "TRUNCATE": truncateTokens, - "SET": setTokens, - "ALTER": alterTokens, - "ALTER TABLE": alterTableTokens, // ALTER TABLE has its own set of tokens -} - -var stmtTypeEnum = map[int]string{ - 0: "DELETE", - 1: "INSERT", - 2: "SELECT", - 3: "LOAD", - 4: "REPLACE", - 5: "CREATE", - 6: "DROP", - 7: "RENAME", - 8: "TRUNCATE", - 9: "SET", - 10: "ALTER", - 11: "ALTER TABLE", -} - -func createStmt(f *ConsumeFuzzer) (string, error) { - stmtIndex, err := f.GetInt() - if err != nil { - return "", err - } - stmtIndex = stmtIndex % len(stmtTypes) - - queryType := stmtTypeEnum[stmtIndex] - tokens := stmtTypes[queryType] - - // We have custom creator for ALTER TABLE - if queryType == "ALTER TABLE" { - query, err := createAlterTableStmt(f) - if err != nil { - return "", err - } - return query, nil - } - - // Here we are creating a query that is not - // an 'alter table' query. For available - // queries, see "stmtTypes" - - // First specify the first query keyword: - var query strings.Builder - query.WriteString(queryType) - - // Next create the args for the - queryArgs, err := createStmtArgs(tokens, f) - if err != nil { - return "", err - } - query.WriteString(" " + queryArgs) - return query.String(), nil -} - -// Creates the arguments of a statements. In a select statement -// that would be everything after "select". -func createStmtArgs(tokenslice [][]string, f *ConsumeFuzzer) (string, error) { - var query, token strings.Builder - - // We go through the tokens in the tokenslice, - // create the respective token and add it to - // "query" - for _, tokens := range tokenslice { - // For extra randomization, the fuzzer can - // choose to not include this token. - includeThisToken, err := f.GetBool() - if err != nil { - return "", err - } - if !includeThisToken { - continue - } - - // There may be several tokens to choose from: - if len(tokens) > 1 { - chosenToken, err := chooseToken(tokens, f) - if err != nil { - return "", err - } - query.WriteString(" " + chosenToken) - } else { - token.WriteString(tokens[0]) - - // In case the token is "CUSTOM_FUZZ_STRING" - // we will then create a non-structured string - if token.String() == "CUSTOM_FUZZ_STRING" { - customFuzzString, err := f.GetString() - if err != nil { - return "", err - } - query.WriteString(" " + customFuzzString) - continue - } - - // Check if token requires an argument. - // Tokens that take an argument can be found - // in 'needCustomString'. If so, we add a - // non-structured string to the token. - if containsString(needCustomString, token.String()) { - customFuzzString, err := f.GetString() - if err != nil { - return "", err - } - token.WriteString(fmt.Sprintf(" %s", customFuzzString)) - } - query.WriteString(fmt.Sprintf(" %s", token.String())) - } - } - return query.String(), nil -} - -// Creates a semi-structured query. It creates a string -// that is a combination of the keywords and random strings. -func createQuery(f *ConsumeFuzzer) (string, error) { - queryLen, err := f.GetInt() - if err != nil { - return "", err - } - maxLen := queryLen % 60 - if maxLen == 0 { - return "", fmt.Errorf("could not create a query") - } - var query strings.Builder - for i := 0; i < maxLen; i++ { - // Get a new token: - useKeyword, err := f.GetBool() - if err != nil { - return "", err - } - if useKeyword { - keyword, err := getKeyword(f) - if err != nil { - return "", err - } - query.WriteString(" " + keyword) - } else { - customString, err := f.GetString() - if err != nil { - return "", err - } - query.WriteString(" " + customString) - } - } - if query.String() == "" { - return "", fmt.Errorf("could not create a query") - } - return query.String(), nil -} - -// GetSQLString is the API that users interact with. -// -// Usage: -// -// f := NewConsumer(data) -// sqlString, err := f.GetSQLString() -func (f *ConsumeFuzzer) GetSQLString() (string, error) { - var query string - veryStructured, err := f.GetBool() - if err != nil { - return "", err - } - if veryStructured { - query, err = createStmt(f) - if err != nil { - return "", err - } - } else { - query, err = createQuery(f) - if err != nil { - return "", err - } - } - return query, nil -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/console_other.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/console_other.go index 933dfaddd..968c5771c 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/console_other.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/console_other.go @@ -1,5 +1,5 @@ -//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos -// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos +//go:build !darwin && !freebsd && !linux && !netbsd && !openbsd && !windows && !zos +// +build !darwin,!freebsd,!linux,!netbsd,!openbsd,!windows,!zos /* Copyright The containerd Authors. diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/console_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/console_unix.go index 161f5d126..aa4c69623 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/console_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/console_unix.go @@ -31,6 +31,15 @@ func NewPty() (Console, string, error) { if err != nil { return nil, "", err } + return NewPtyFromFile(f) +} + +// NewPtyFromFile creates a new pty pair, just like [NewPty] except that the +// provided [os.File] is used as the master rather than automatically creating +// a new master from /dev/ptmx. The ownership of [os.File] is passed to the +// returned [Console], so the caller must be careful to not call Close on the +// underlying file. +func NewPtyFromFile(f File) (Console, string, error) { slave, err := ptsname(f) if err != nil { return nil, "", err diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_darwin.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_darwin.go index 787154580..77c695a40 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_darwin.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_darwin.go @@ -18,7 +18,6 @@ package console import ( "fmt" - "os" "golang.org/x/sys/unix" ) @@ -30,12 +29,12 @@ const ( // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { +func unlockpt(f File) error { return unix.IoctlSetPointerInt(int(f.Fd()), unix.TIOCPTYUNLK, 0) } // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) if err != nil { return "", err diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_cgo.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_cgo.go index 332825794..627f7d55a 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_cgo.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_cgo.go @@ -21,7 +21,6 @@ package console import ( "fmt" - "os" "golang.org/x/sys/unix" ) @@ -39,7 +38,7 @@ const ( // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { +func unlockpt(f File) error { fd := C.int(f.Fd()) if _, err := C.unlockpt(fd); err != nil { C.close(fd) @@ -49,7 +48,7 @@ func unlockpt(f *os.File) error { } // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) if err != nil { return "", err diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_nocgo.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_nocgo.go index 18a9b9cbe..434ba46ef 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_nocgo.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_freebsd_nocgo.go @@ -21,7 +21,6 @@ package console import ( "fmt" - "os" "golang.org/x/sys/unix" ) @@ -42,12 +41,12 @@ const ( // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { +func unlockpt(f File) error { panic("unlockpt() support requires cgo.") } // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) if err != nil { return "", err diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_linux.go index 7d552ea4b..e98dc022d 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_linux.go @@ -18,7 +18,6 @@ package console import ( "fmt" - "os" "unsafe" "golang.org/x/sys/unix" @@ -31,7 +30,7 @@ const ( // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { +func unlockpt(f File) error { var u int32 // XXX do not use unix.IoctlSetPointerInt here, see commit dbd69c59b81. if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { @@ -41,7 +40,7 @@ func unlockpt(f *os.File) error { } // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { var u uint32 // XXX do not use unix.IoctlGetInt here, see commit dbd69c59b81. if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_netbsd.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_netbsd.go index 71227aefd..73cf43977 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_netbsd.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_netbsd.go @@ -18,7 +18,6 @@ package console import ( "bytes" - "os" "golang.org/x/sys/unix" ) @@ -31,12 +30,12 @@ const ( // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. // This does not exist on NetBSD, it does not allocate controlling terminals on open -func unlockpt(f *os.File) error { +func unlockpt(f File) error { return nil } // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { ptm, err := unix.IoctlGetPtmget(int(f.Fd()), unix.TIOCPTSNAME) if err != nil { return "", err diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_cgo.go index 0e76f6cc3..46f4250c4 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_cgo.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_cgo.go @@ -20,8 +20,6 @@ package console import ( - "os" - "golang.org/x/sys/unix" ) @@ -34,7 +32,7 @@ const ( ) // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { ptspath, err := C.ptsname(C.int(f.Fd())) if err != nil { return "", err @@ -44,7 +42,7 @@ func ptsname(f *os.File) (string, error) { // unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. // unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { +func unlockpt(f File) error { if _, err := C.grantpt(C.int(f.Fd())); err != nil { return err } diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_nocgo.go index dca92418b..a8f9f6c25 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_nocgo.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_openbsd_nocgo.go @@ -29,8 +29,6 @@ package console import ( - "os" - "golang.org/x/sys/unix" ) @@ -39,10 +37,10 @@ const ( cmdTcSet = unix.TIOCSETA ) -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { panic("ptsname() support requires cgo.") } -func unlockpt(f *os.File) error { +func unlockpt(f File) error { panic("unlockpt() support requires cgo.") } diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_zos.go b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_zos.go index fc90ba5fb..23b0bd282 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_zos.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/console/tc_zos.go @@ -17,7 +17,6 @@ package console import ( - "os" "strings" "golang.org/x/sys/unix" @@ -29,11 +28,11 @@ const ( ) // unlockpt is a no-op on zos. -func unlockpt(_ *os.File) error { +func unlockpt(File) error { return nil } // ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { +func ptsname(f File) (string, error) { return "/dev/ttyp" + strings.TrimPrefix(f.Name(), "/dev/ptyp"), nil } diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/helpers.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/helpers.go index 74cb566b0..749e65311 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/helpers.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/helpers.go @@ -200,7 +200,7 @@ func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected dig } if size != 0 && copied < size-ws.Offset { // Short writes would return its own error, this indicates a read failure - return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF) + return fmt.Errorf("short read: expected %d bytes but got %d: %w", size-ws.Offset, copied, io.ErrUnexpectedEOF) } if err := cw.Commit(ctx, size, expected, opts...); err != nil { if errors.Is(err, ErrReset) { diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/proxy/content_writer.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/proxy/content_writer.go index 214a0a335..45718be7d 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/proxy/content_writer.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/content/proxy/content_writer.go @@ -20,12 +20,14 @@ import ( "context" "fmt" "io" + "slices" contentapi "github.com/containerd/containerd/api/services/content/v1" "github.com/containerd/errdefs/pkg/errgrpc" digest "github.com/opencontainers/go-digest" "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/defaults" "github.com/containerd/containerd/v2/pkg/protobuf" ) @@ -76,27 +78,31 @@ func (rw *remoteWriter) Digest() digest.Digest { } func (rw *remoteWriter) Write(p []byte) (n int, err error) { - offset := rw.offset + const maxBufferSize = defaults.DefaultMaxSendMsgSize >> 1 + for data := range slices.Chunk(p, maxBufferSize) { + offset := rw.offset - resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteAction_WRITE, - Offset: offset, - Data: p, - }) - if err != nil { - return 0, fmt.Errorf("failed to send write: %w", errgrpc.ToNative(err)) - } + resp, err := rw.send(&contentapi.WriteContentRequest{ + Action: contentapi.WriteAction_WRITE, + Offset: offset, + Data: data, + }) + if err != nil { + return 0, fmt.Errorf("failed to send write: %w", errgrpc.ToNative(err)) + } - n = int(resp.Offset - offset) - if n < len(p) { - err = io.ErrShortWrite - } + written := int(resp.Offset - offset) + rw.offset += int64(written) + if resp.Digest != "" { + rw.digest = digest.Digest(resp.Digest) + } + n += written - rw.offset += int64(n) - if resp.Digest != "" { - rw.digest = digest.Digest(resp.Digest) + if written < len(data) { + return n, io.ErrShortWrite + } } - return + return n, nil } func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) (err error) { diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/handlers.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/handlers.go index 0487fdebd..30543484c 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/handlers.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/handlers.go @@ -154,8 +154,6 @@ func WalkNotEmpty(ctx context.Context, handler Handler, descs ...ocispec.Descrip func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { eg, ctx2 := errgroup.WithContext(ctx) for _, desc := range descs { - desc := desc - if limiter != nil { if err := limiter.Acquire(ctx, 1); err != nil { return err diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/image.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/image.go index 6bc106aac..9fcce9b4e 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/image.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/image.go @@ -369,8 +369,8 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr } return append([]ocispec.Descriptor{}, index.Manifests...), nil - } else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) { - // Layers and configs are childless data types and should not be logged. + } else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) && !IsAttestationType(desc.MediaType) { + // Layers, configs, and attestations are childless data types and should not be logged. log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) } return nil, nil diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/labels.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/labels.go deleted file mode 100644 index 06dfed572..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/labels.go +++ /dev/null @@ -1,21 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package images - -const ( - ConvertedDockerSchema1LabelKey = "io.containerd.image/converted-docker-schema1" -) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go index d2e845b16..0c8600d86 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/images/mediatypes.go @@ -58,6 +58,9 @@ const ( MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" + + // In-toto attestation + MediaTypeInToto = "application/vnd.in-toto+json" ) // DiffCompression returns the compression as defined by the layer diff media @@ -193,6 +196,16 @@ func IsKnownConfig(mt string) bool { return false } +// IsAttestationType returns true if the media type is an attestation type +func IsAttestationType(mt string) bool { + switch mt { + case MediaTypeInToto: + return true + default: + return false + } +} + // ChildGCLabels returns the label for a given descriptor to reference it func ChildGCLabels(desc ocispec.Descriptor) []string { mt := desc.MediaType diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go index 16ea609a1..f68c22ab7 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/auth/fetch.go @@ -78,10 +78,10 @@ type TokenOptions struct { // FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token. // // For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request. - // https://docs.docker.com/registry/spec/auth/token/#requesting-a-token + // https://distribution.github.io/distribution/spec/auth/token/#requesting-a-token // // For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request. - // https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token + // https://distribution.github.io/distribution/spec/auth/oauth/#getting-a-token FetchRefreshToken bool } diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go index 01fc792fa..00d1b3411 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/authorizer.go @@ -70,9 +70,18 @@ func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { } // WithAuthHeader provides HTTP headers for authorization +// +// We need to merge instead of replacing because header may be set by +// a per-host hosts.toml or/AND by a global header config (e.g., cri.config.headers) func WithAuthHeader(hdr http.Header) AuthorizerOpt { return func(opt *authorizerConfig) { - opt.header = hdr + if opt.header == nil { + opt.header = hdr.Clone() + } else { + for k, v := range hdr { + opt.header[k] = append(opt.header[k], v...) + } + } } } @@ -88,7 +97,7 @@ func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt { // NewDockerAuthorizer creates an authorizer using Docker's registry // authentication spec. -// See https://docs.docker.com/registry/spec/auth/ +// See https://distribution.github.io/distribution/spec/auth/ func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { var ao authorizerConfig for _, opt := range opts { @@ -269,7 +278,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st to.Scopes = GetTokenScopes(ctx, to.Scopes) - // Docs: https://docs.docker.com/registry/spec/auth/scope + // Docs: https://distribution.github.io/distribution/spec/auth/scope/ scoped := strings.Join(to.Scopes, " ") // Keep track of the expiration time of cached bearer tokens so they can be diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go deleted file mode 100644 index c97b6b899..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/converter_fuzz.go +++ /dev/null @@ -1,54 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "os" - - fuzz "github.com/AdaLogics/go-fuzz-headers" - "github.com/containerd/containerd/v2/plugins/content/local" - "github.com/containerd/log" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -func FuzzConvertManifest(data []byte) int { - ctx := context.Background() - - // Do not log the message below - // level=warning msg="do nothing for media type: ..." - log.G(ctx).Logger.SetLevel(log.PanicLevel) - - f := fuzz.NewConsumer(data) - desc := ocispec.Descriptor{} - err := f.GenerateStruct(&desc) - if err != nil { - return 0 - } - tmpdir, err := os.MkdirTemp("", "fuzzing-") - if err != nil { - return 0 - } - cs, err := local.NewStore(tmpdir) - if err != nil { - return 0 - } - _, _ = ConvertManifest(ctx, cs, desc) - return 1 -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go index 8c623bcbe..b62256f82 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/errcode.go @@ -18,8 +18,12 @@ package docker import ( "encoding/json" + "errors" "fmt" + "net/http" "strings" + + remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors" ) // ErrorCoder is the base interface for ErrorCode and Error allowing @@ -281,3 +285,21 @@ func (errs *Errors) UnmarshalJSON(data []byte) error { *errs = newErrs return nil } + +func unexpectedResponseErr(resp *http.Response) (retErr error) { + retErr = remoteerrors.NewUnexpectedStatusErr(resp) + + // Decode registry error if provided + if rerr := retErr.(remoteerrors.ErrUnexpectedStatus); len(rerr.Body) > 0 { + var registryErr Errors + if err := json.Unmarshal(rerr.Body, ®istryErr); err == nil && registryErr.Len() > 0 { + // Join the unexpected error with the typed errors, when printed it will + // show the unexpected error message and the registry errors. The body + // is always excluded from the unexpected error message. This also allows + // clients to decode into either type. + retErr = errors.Join(rerr, registryErr) + } + } + + return +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go index 2c7d880a9..3e74df8cc 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher.go @@ -17,26 +17,202 @@ package docker import ( + "bytes" "compress/flate" "compress/gzip" "context" - "encoding/json" "errors" "fmt" "io" "net/http" "net/url" + "strconv" "strings" + "sync" - "github.com/containerd/containerd/v2/core/images" - "github.com/containerd/containerd/v2/core/remotes" "github.com/containerd/errdefs" "github.com/containerd/log" "github.com/klauspost/compress/zstd" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" ) +type bufferPool struct { + pool *sync.Pool +} + +func newbufferPool(bufCap int64) *bufferPool { + pool := &sync.Pool{ + New: func() any { + return bytes.NewBuffer(make([]byte, 0, bufCap)) + }, + } + return &bufferPool{ + pool: pool, + } +} + +func (p *bufferPool) Get() *bytes.Buffer { + buf := p.pool.Get().(*bytes.Buffer) + return buf +} + +func (p *bufferPool) Put(buffer *bytes.Buffer) { + p.pool.Put(buffer) +} + +var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe") + +// pipe implements an asynchronous buffered pipe designed for high-throughput +// I/O with configurable initial buffer sizes and buffer reuse. It decouples +// read/write operations, allowing writers to proceed without blocking (unless +// the pipe is closed) and readers to wait efficiently for incoming data. +// +// Key Characteristics: +// - Asynchronous Operation: Writers populate buffers independently of read +// timing, enabling continuous data flow without reader-writer synchronization. +// - Dynamic Buffering: Active buffer grows organically to handle large payloads, while +// the initial capacity (bufCap) balances memory pre-allocation and growth overhead. +// - Buffer Recycling: Retrieved from/pushed to a pool to minimize allocations, reducing +// garbage collection pressure in sustained I/O scenarios. +// - Error Semantics: Closes deterministically on first error (read or write), propagating +// errors atomically to both ends while draining buffered data. +// +// Difference with io.Pipe: +// - Unlike io.Pipe's strict synchronization (blocking write until read), this implementation +// allows writers to buffer data ahead of reads, improving throughput for bursty workloads. +// +// Synchronization & Internals: +// - Condition Variable (sync.Cond): Coordinates reader/writer, waking readers on new data +// or closure. Locking is centralized via the condition's mutex. +// - Buffer Lifecycle: Active buffer serves writes until read depletion, after which it's +// recycled to the pool. Pooled buffers retain their capacity across uses. +// - Error Handling: Write errors (werr) permanently fail writes; read errors (rerr) mark +// terminal read state after buffer exhaustion. +// +// Future Considerations: +// - Zero-copy reads/writes to avoid buffer copying overhead. +// - Memory-mapped file backing for multi-gigabyte payloads. +type pipe struct { + cond *sync.Cond // Coordinates read/write signaling via Lock+Wait/Signal + bufPool *bufferPool // Reusable buffers with initial capacity bufCap + buf *bytes.Buffer // Active data buffer (nil when empty/returned to pool) + rerr, werr error // Terminal read/write errors (sticky once set) +} + +type pipeReader struct { + *pipe +} + +type pipeWriter struct { + *pipe +} + +func newPipeWriter(bufPool *bufferPool) (*pipeReader, *pipeWriter) { + p := &pipe{ + cond: sync.NewCond(new(sync.Mutex)), + bufPool: bufPool, + buf: nil, + } + return &pipeReader{ + pipe: p, + }, &pipeWriter{ + pipe: p, + } +} + +// Read implements the standard Read interface: it reads data from the pipe, +// reading from the internal buffer, otherwise blocking until a writer arrives +// or the write end is closed. If the write end is closed with an error, that +// error is returned as err; otherwise err is io.EOF. +func (r *pipeReader) Read(data []byte) (n int, err error) { + r.cond.L.Lock() + defer r.cond.L.Unlock() + + if r.buf == nil { + r.buf = r.bufPool.Get() + } + + for { + n, err = r.buf.Read(data) + // If not closed and no read, wait for writing. + if err == io.EOF && r.rerr == nil && n == 0 { + r.cond.Wait() // Wait for data to be written + continue + } + break + } + if err == io.EOF { + // Put buffer back to pool + r.bufPool.Put(r.buf) + r.buf = nil + return n, r.rerr + } + return n, err +} + +// Close closes the reader; subsequent writes from the write half of the pipe +// will return error ErrClosedPipe. +func (r *pipeReader) Close() error { + return r.CloseWithError(nil) +} + +// CloseWithError closes the reader; subsequent writes to the write half of the +// pipe will return the error err. +func (r *pipeReader) CloseWithError(err error) error { + r.cond.L.Lock() + defer r.cond.L.Unlock() + + if err == nil { + err = ErrClosedPipe + } + r.werr = err + return nil +} + +// Write implements the standard Write interface: it writes data to the internal +// buffer. If the read end is closed with an error, that err is returned as err; +// otherwise err is ErrClosedPipe. +func (w *pipeWriter) Write(data []byte) (int, error) { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + if w.werr != nil { + return 0, w.werr + } + + if w.buf == nil { + w.buf = w.bufPool.Get() + } + + n, err := w.buf.Write(data) + w.cond.Signal() + return n, err +} + +// Close closes the writer; subsequent reads from the read half of the pipe will +// return io.EOF once the internal buffer get empty. +func (w *pipeWriter) Close() error { + return w.CloseWithError(nil) +} + +// Close closes the writer; subsequent reads from the read half of the pipe will +// return err once the internal buffer get empty. +func (w *pipeWriter) CloseWithError(err error) error { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + if err == nil { + err = io.EOF + } + w.rerr = err + w.cond.Broadcast() + return nil +} + type dockerFetcher struct { *dockerBase } @@ -84,7 +260,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R req.path = req.path + "?" + u.RawQuery } - rc, err := r.open(ctx, req, desc.MediaType, offset) + rc, err := r.open(ctx, req, desc.MediaType, offset, false) if err != nil { if errdefs.IsNotFound(err) { continue // try one of the other urls. @@ -97,17 +273,16 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R } // Try manifests endpoints for manifests types - if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) || - desc.MediaType == images.MediaTypeDockerSchema1Manifest { + if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) { var firstErr error - for _, host := range r.hosts { + for i, host := range r.hosts { req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) if err := req.addNamespace(r.refspec.Hostname()); err != nil { return nil, err } - rc, err := r.open(ctx, req, desc.MediaType, offset) + rc, err := r.open(ctx, req, desc.MediaType, offset, i == len(r.hosts)-1) if err != nil { // Store the error for referencing later if firstErr == nil { @@ -124,13 +299,13 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R // Finally use blobs endpoints var firstErr error - for _, host := range r.hosts { + for i, host := range r.hosts { req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) if err := req.addNamespace(r.refspec.Hostname()); err != nil { return nil, err } - rc, err := r.open(ctx, req, desc.MediaType, offset) + rc, err := r.open(ctx, req, desc.MediaType, offset, i == len(r.hosts)-1) if err != nil { // Store the error for referencing later if firstErr == nil { @@ -153,7 +328,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } -func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, mediatype string, ps ...string) (*request, int64, error) { +func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, lastHost bool, mediatype string, ps ...string) (*request, int64, error) { headReq := r.request(host, http.MethodHead, ps...) if err := headReq.addNamespace(r.refspec.Hostname()); err != nil { return nil, 0, err @@ -165,7 +340,7 @@ func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, medi headReq.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) } - headResp, err := headReq.doWithRetries(ctx, nil) + headResp, err := headReq.doWithRetries(ctx, lastHost) if err != nil { return nil, 0, err } @@ -209,8 +384,8 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op firstErr error ) - for _, host := range r.hosts { - getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "blobs", dgst.String()) + for i, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, i == len(r.hosts)-1, config.Mediatype, "blobs", dgst.String()) if err == nil { break } @@ -222,8 +397,8 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op if getReq == nil { // Fall back to the "manifests" endpoint - for _, host := range r.hosts { - getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "manifests", dgst.String()) + for i, host := range r.hosts { + getReq, sz, err = r.createGetReq(ctx, host, i == len(r.hosts)-1, config.Mediatype, "manifests", dgst.String()) if err == nil { break } @@ -245,7 +420,7 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op } seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) { - return r.open(ctx, getReq, config.Mediatype, offset) + return r.open(ctx, getReq, config.Mediatype, offset, true) }) if err != nil { return nil, desc, err @@ -262,80 +437,148 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, op return seeker, desc, nil } -func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { - if mediatype == "" { - req.header.Set("Accept", "*/*") - } else { - req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64, lastHost bool) (_ io.ReadCloser, retErr error) { + const minChunkSize = 512 + + chunkSize := int64(r.performances.ConcurrentLayerFetchBuffer) + parallelism := int64(r.performances.MaxConcurrentDownloads) + if chunkSize < minChunkSize || req.body != nil { + parallelism = 1 } + log.G(ctx).WithField("initial_parallelism", r.performances.MaxConcurrentDownloads). + WithField("parallelism", parallelism). + WithField("chunk_size", chunkSize). + WithField("offset", offset). + Debug("fetching layer") + req.setMediaType(mediatype) req.header.Set("Accept-Encoding", "zstd;q=1.0, gzip;q=0.8, deflate;q=0.5") - - if offset > 0 { - // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints - // will return the header without supporting the range. The content - // range must always be checked. - req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + if parallelism > 1 || offset > 0 { + req.setOffset(offset) } - resp, err := req.doWithRetries(ctx, nil) - if err != nil { + if err := r.Acquire(ctx, 1); err != nil { return nil, err } - defer func() { - if retErr != nil { - resp.Body.Close() - } - }() - - if resp.StatusCode > 299 { - // TODO(stevvooe): When doing a offset specific request, we should - // really distinguish between a 206 and a 200. In the case of 200, we - // can discard the bytes, hiding the seek behavior from the - // implementation. - - if resp.StatusCode == http.StatusNotFound { - return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound) - } - var registryErr Errors - if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status) - } - return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) - } - if offset > 0 { - cr := resp.Header.Get("content-range") - if cr != "" { - if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { - return nil, fmt.Errorf("unhandled content range in response: %v", cr) - - } - } else { - // TODO: Should any cases where use of content range - // without the proper header be considered? - // 206 responses? - - // Discard up to offset - // Could use buffer pool here but this case should be rare - n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset)) - if err != nil { - return nil, fmt.Errorf("failed to discard to offset: %w", err) - } - if n != offset { - return nil, errors.New("unable to discard to offset") - } - + resp, err := req.doWithRetries(ctx, lastHost, withErrorCheck, withOffsetCheck(offset)) + switch err { + case nil: + // all good + case errContentRangeIgnored: + if parallelism != 1 { + log.G(ctx).WithError(err).Info("remote host ignored content range, forcing parallelism to 1") + parallelism = 1 } + default: + log.G(ctx).WithError(err).Debug("fetch failed") + r.Release(1) + return nil, err } body := resp.Body encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool { return r == ' ' || r == '\t' || r == ',' }) + + remaining, _ := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 0) + if remaining <= chunkSize { + parallelism = 1 + } + + if parallelism > 1 { + // If we have a content length, we can use multiple requests to fetch + // the content in parallel. This will make download of bigger bodies + // faster, at the cost of parallelism more requests and max + // ~(max_parallelism * goroutine footprint) memory usage. The goroutine + // footprint should be: the goroutine stack + pipe buffer size + numChunks := remaining / chunkSize + if numChunks*chunkSize < remaining { + numChunks++ + } + if numChunks < parallelism { + parallelism = numChunks + } + queue := make(chan int64, parallelism) + ctx, cancelCtx := context.WithCancel(ctx) + done := ctx.Done() + readers, writers := make([]io.Reader, numChunks), make([]*pipeWriter, numChunks) + bufPool := newbufferPool(chunkSize) + for i := range numChunks { + readers[i], writers[i] = newPipeWriter(bufPool) + } + go func() { + for i := range numChunks { + select { + case queue <- i: + case <-done: + return // avoid leaking a goroutine if we exit early. + } + } + close(queue) + }() + r.Release(1) + for range parallelism { + go func() { + for i := range queue { // first in first out + copy := func() error { + if err := r.Acquire(ctx, 1); err != nil { + return err + } + defer r.Release(1) + var body io.ReadCloser + if i == 0 { + body = resp.Body + } else { + reqClone := req.clone() + reqClone.setOffset(offset + i*chunkSize) + nresp, err := reqClone.doWithRetries(ctx, lastHost, withErrorCheck) + if err != nil { + _ = writers[i].CloseWithError(err) + select { + case <-done: + return ctx.Err() + default: + cancelCtx() + } + return err + } + body = nresp.Body + } + _, err := io.Copy(writers[i], io.LimitReader(body, chunkSize)) + _ = body.Close() + _ = writers[i].CloseWithError(err) + if err != nil && err != io.EOF { + cancelCtx() + return err + } + return nil + } + if copy() != nil { + return + } + } + }() + } + body = &fnOnClose{ + BeforeClose: func() { + cancelCtx() + }, + ReadCloser: io.NopCloser(io.MultiReader(readers...)), + } + } else { + body = &fnOnClose{ + BeforeClose: func() { + r.Release(1) + }, + ReadCloser: body, + } + } for i := len(encoding) - 1; i >= 0; i-- { algorithm := strings.ToLower(encoding[i]) switch algorithm { case "zstd": - r, err := zstd.NewReader(body) + r, err := zstd.NewReader(body, + zstd.WithDecoderLowmem(false), + ) if err != nil { return nil, err } @@ -356,3 +599,17 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, return body, nil } + +type fnOnClose struct { + BeforeClose func() + io.ReadCloser +} + +// Close calls the BeforeClose function before closing the underlying +// ReadCloser. +func (f *fnOnClose) Close() error { + f.BeforeClose() + return f.ReadCloser.Close() +} + +var _ io.ReadCloser = &fnOnClose{} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go deleted file mode 100644 index 5da9dca7a..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/fetcher_fuzz.go +++ /dev/null @@ -1,75 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package docker - -import ( - "context" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "strconv" -) - -func FuzzFetcher(data []byte) int { - dataLen := len(data) - if dataLen == 0 { - return -1 - } - - s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { - rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen)) - rw.Header().Set("content-length", strconv.Itoa(dataLen)) - rw.Write(data) - })) - defer s.Close() - - u, err := url.Parse(s.URL) - if err != nil { - return 0 - } - - f := dockerFetcher{&dockerBase{ - repository: "nonempty", - }} - host := RegistryHost{ - Client: s.Client(), - Host: u.Host, - Scheme: u.Scheme, - Path: u.Path, - } - - ctx := context.Background() - req := f.request(host, http.MethodGet) - rc, err := f.open(ctx, req, "", 0) - if err != nil { - return 0 - } - b, err := io.ReadAll(rc) - if err != nil { - return 0 - } - - expected := data - if len(b) != len(expected) { - panic("len of request is not equal to len of expected but should be") - } - return 1 -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go index f994fff5a..956e83cbb 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/pusher.go @@ -28,14 +28,14 @@ import ( "sync" "time" - "github.com/containerd/containerd/v2/core/content" - "github.com/containerd/containerd/v2/core/images" - "github.com/containerd/containerd/v2/core/remotes" - remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors" "github.com/containerd/errdefs" "github.com/containerd/log" digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" + "github.com/containerd/containerd/v2/core/remotes" ) type dockerPusher struct { @@ -115,7 +115,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") - resp, err := req.doWithRetries(ctx, nil) + resp, err := req.doWithRetries(ctx, true) if err != nil { if !errors.Is(err, ErrInvalidAuthorization) { return nil, err @@ -148,8 +148,8 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) } } else if resp.StatusCode != http.StatusNotFound { - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + err := unexpectedResponseErr(resp) + log.G(ctx).WithError(err).Debug("unexpected response") resp.Body.Close() return nil, err } @@ -176,24 +176,28 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str // // for the private repo, we should remove mount-from // query and send the request again. - resp, err = preq.doWithRetries(pctx, nil) + resp, err = preq.doWithRetries(pctx, true) if err != nil { - return nil, err + if !errors.Is(err, ErrInvalidAuthorization) { + return nil, fmt.Errorf("pushing with mount from %s: %w", fromRepo, err) + } + log.G(ctx).Debugf("failed to push with mount from repository %s: %v", fromRepo, err) } + if resp != nil { + switch resp.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).Debugf("failed to mount from repository %s, not authorized", fromRepo) - switch resp.StatusCode { - case http.StatusUnauthorized: - log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) - - resp.Body.Close() - resp = nil - case http.StatusCreated: - mountedFrom = path.Join(p.refspec.Hostname(), fromRepo) + resp.Body.Close() + resp = nil + case http.StatusCreated: + mountedFrom = path.Join(p.refspec.Hostname(), fromRepo) + } } } if resp == nil { - resp, err = req.doWithRetries(ctx, nil) + resp, err = req.doWithRetries(ctx, true) if err != nil { if errors.Is(err, ErrInvalidAuthorization) { return nil, fmt.Errorf("push access denied, repository does not exist or may require authorization: %w", err) @@ -219,8 +223,8 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str }) return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists) default: - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + err := unexpectedResponseErr(resp) + log.G(ctx).WithError(err).Debug("unexpected response") return nil, err } @@ -285,7 +289,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str req.size = desc.Size go func() { - resp, err := req.doWithRetries(ctx, nil) + resp, err := req.doWithRetries(ctx, true) if err != nil { pushw.setError(err) return @@ -294,8 +298,8 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent: default: - err := remoteserrors.NewUnexpectedStatusErr(resp) - log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + err := unexpectedResponseErr(resp) + log.G(ctx).WithError(err).Debug("unexpected response") pushw.setError(err) return } @@ -477,13 +481,15 @@ func (pw *pushWriter) Digest() digest.Digest { func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { // Check whether read has already thrown an error - if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) { - return fmt.Errorf("pipe error before commit: %w", err) + if pw.pipe != nil { + if _, err := pw.pipe.Write([]byte{}); err != nil && !errors.Is(err, io.ErrClosedPipe) { + return fmt.Errorf("pipe error before commit: %w", err) + } + if err := pw.pipe.Close(); err != nil { + return err + } } - if err := pw.pipe.Close(); err != nil { - return err - } // TODO: timeout waiting for response var resp *http.Response select { @@ -506,7 +512,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di switch resp.StatusCode { case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: default: - return remoteserrors.NewUnexpectedStatusErr(resp) + return unexpectedResponseErr(resp) } status, err := pw.tracker.GetStatus(pw.ref) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go index 98cafcd06..bbae768b1 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/registry.go @@ -17,9 +17,11 @@ package docker import ( + "crypto/tls" "errors" "net" "net/http" + "time" ) // HostCapabilities represent the capabilities of the registry @@ -170,7 +172,9 @@ func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { } if config.Client == nil { - config.Client = http.DefaultClient + config.Client = &http.Client{ + Transport: DefaultHTTPTransport(nil), + } } if opts.plainHTTP != nil { @@ -242,3 +246,19 @@ func MatchLocalhost(host string) (bool, error) { return ip.IsLoopback(), nil } + +func DefaultHTTPTransport(defaultTLSConfig *tls.Config) *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + FallbackDelay: 300 * time.Millisecond, + }).DialContext, + MaxIdleConns: 10, + IdleConnTimeout: 30 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: defaultTLSConfig, + ExpectContinueTimeout: 5 * time.Second, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go index dce391c54..1c0f7c484 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/resolver.go @@ -34,11 +34,11 @@ import ( "github.com/containerd/log" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sync/semaphore" "github.com/containerd/containerd/v2/core/images" "github.com/containerd/containerd/v2/core/remotes" - "github.com/containerd/containerd/v2/core/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. - remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors" + "github.com/containerd/containerd/v2/core/transfer" "github.com/containerd/containerd/v2/pkg/reference" "github.com/containerd/containerd/v2/pkg/tracing" "github.com/containerd/containerd/v2/version" @@ -142,6 +142,7 @@ type dockerResolver struct { header http.Header resolveHeader http.Header tracker StatusTracker + config transfer.ImageResolverOptions } // NewResolver returns a new resolver to a Docker registry @@ -156,9 +157,6 @@ func NewResolver(options ResolverOptions) remotes.Resolver { // make a copy of the headers to avoid race due to concurrent map write options.Headers = options.Headers.Clone() } - if _, ok := options.Headers["User-Agent"]; !ok { - options.Headers.Set("User-Agent", "containerd/"+version.Version) - } resolveHeader := http.Header{} if _, ok := options.Headers["Accept"]; !ok { @@ -232,7 +230,7 @@ func (r *countingReader) Read(p []byte) (int, error) { return n, err } -var _ remotes.Resolver = &dockerResolver{} +var _ remotes.ResolverWithOptions = &dockerResolver{} func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { base, err := r.resolveDockerBase(ref) @@ -307,7 +305,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } log.G(ctx).Debug("resolving") - resp, err := req.doWithRetries(ctx, nil) + resp, err := req.doWithRetries(ctx, i == len(hosts)-1) if err != nil { if errors.Is(err, ErrInvalidAuthorization) { err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err) @@ -332,13 +330,13 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } if resp.StatusCode > 399 { if firstErrPriority < 3 { - firstErr = remoteerrors.NewUnexpectedStatusErr(resp) + firstErr = unexpectedResponseErr(resp) firstErrPriority = 3 } log.G(ctx).Infof("%s after status: %s", nextHostOrFail(i), resp.Status) continue // try another host } - return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp) + return "", ocispec.Descriptor{}, unexpectedResponseErr(resp) } size := resp.ContentLength contentType := getManifestMediaType(resp) @@ -371,7 +369,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp req.header[key] = append(req.header[key], value...) } - resp, err := req.doWithRetries(ctx, nil) + resp, err := req.doWithRetries(ctx, true) if err != nil { return "", ocispec.Descriptor{}, err } @@ -387,13 +385,8 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) - if err != nil { - return err - } - - dgst = digest.FromBytes(b) - return nil + return fmt.Errorf("%w: media type %q is no longer supported since containerd v2.0, please rebuild the image as %q or %q", + errdefs.ErrNotImplemented, images.MediaTypeDockerSchema1Manifest, images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest) } dgst, err = digest.FromReader(&bodyReader) @@ -433,6 +426,12 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, firstErr } +func (r *dockerResolver) SetOptions(options ...transfer.ImageResolverOption) { + for _, opt := range options { + opt(&r.config) + } +} + func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { base, err := r.resolveDockerBase(ref) if err != nil { @@ -467,10 +466,25 @@ func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { } type dockerBase struct { - refspec reference.Spec - repository string - hosts []RegistryHost - header http.Header + refspec reference.Spec + repository string + hosts []RegistryHost + header http.Header + performances transfer.ImageResolverPerformanceSettings + limiter *semaphore.Weighted +} + +func (r *dockerBase) Acquire(ctx context.Context, weight int64) error { + if r.limiter == nil { + return nil + } + return r.limiter.Acquire(ctx, weight) +} + +func (r *dockerBase) Release(weight int64) { + if r.limiter != nil { + r.limiter.Release(weight) + } } func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { @@ -480,10 +494,12 @@ func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { return nil, err } return &dockerBase{ - refspec: refspec, - repository: strings.TrimPrefix(refspec.Locator, host+"/"), - hosts: hosts, - header: r.header, + refspec: refspec, + repository: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, + performances: r.config.Performances, + limiter: r.config.DownloadLimiter, }, nil } @@ -505,6 +521,11 @@ func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *re for key, value := range host.Header { header[key] = append(header[key], value...) } + + if len(header.Get("User-Agent")) == 0 { + header.Set("User-Agent", "containerd/"+version.Version) + } + parts := append([]string{"/", host.Path, r.repository}, ps...) p := path.Join(parts...) // Join strips trailing slash, re-add ending "/" if included @@ -562,6 +583,12 @@ type request struct { size int64 } +func (r *request) clone() *request { + res := *r + res.header = r.header.Clone() + return &res +} + func (r *request) do(ctx context.Context) (*http.Response, error) { u := r.host.Scheme + "://" + r.host.Host + r.path req, err := http.NewRequestWithContext(ctx, r.method, u, nil) @@ -617,26 +644,91 @@ func (r *request) do(ctx context.Context) (*http.Response, error) { return resp, nil } -func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { +type doChecks func(r *request, resp *http.Response) error + +func withErrorCheck(r *request, resp *http.Response) error { + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("content at %v not found: %w", r.String(), errdefs.ErrNotFound) + } + + return unexpectedResponseErr(resp) + } + return nil +} + +var errContentRangeIgnored = errors.New("content range requests ignored") + +func withOffsetCheck(offset int64) doChecks { + return func(r *request, resp *http.Response) error { + if offset == 0 { + return nil + } + if resp.StatusCode == http.StatusPartialContent { + return nil + } + if cr := resp.Header.Get("Content-Range"); cr != "" { + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + return fmt.Errorf("unhandled content range in response: %v", cr) + } + return nil + } + + // Discard up to offset + // Could use buffer pool here but this case should be rare + n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset)) + if err != nil { + return fmt.Errorf("failed to discard to offset: %w", err) + } + if n != offset { + return errors.New("unable to discard to offset") + } + + // content range ignored, we can't do concurrent fetches here. + // return an error to be caught + return errContentRangeIgnored + } +} + +func (r *request) doWithRetries(ctx context.Context, lastHost bool, checks ...doChecks) (resp *http.Response, err error) { + resp, err = r.doWithRetriesInner(ctx, nil, lastHost) + if err != nil { + return nil, err + } + defer func() { + if err != nil && err != errContentRangeIgnored { + resp.Body.Close() + } + }() + for _, check := range checks { + if err := check(r, resp); err != nil { + return resp, err + } + } + + return resp, nil +} + +func (r *request) doWithRetriesInner(ctx context.Context, responses []*http.Response, lastHost bool) (*http.Response, error) { resp, err := r.do(ctx) if err != nil { return nil, err } responses = append(responses, resp) - retry, err := r.retryRequest(ctx, responses) + retry, err := r.retryRequest(ctx, responses, lastHost) if err != nil { resp.Body.Close() return nil, err } if retry { resp.Body.Close() - return r.doWithRetries(ctx, responses) + return r.doWithRetriesInner(ctx, responses, lastHost) } return resp, err } -func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { +func (r *request) retryRequest(ctx context.Context, responses []*http.Response, lastHost bool) (bool, error) { if len(responses) > 5 { return false, nil } @@ -662,9 +754,17 @@ func (r *request) retryRequest(ctx context.Context, responses []*http.Response) } case http.StatusRequestTimeout, http.StatusTooManyRequests: return true, nil + case http.StatusServiceUnavailable, http.StatusGatewayTimeout, http.StatusInternalServerError: + // Do not retry if the same error was seen in the last request + if len(responses) > 1 && responses[len(responses)-2].StatusCode == last.StatusCode { + return false, nil + } + // Only retry if this is the last host that will be attempted + if lastHost { + return true, nil + } } - // TODO: Handle 50x errors accounting for attempt history return false, nil } @@ -672,6 +772,18 @@ func (r *request) String() string { return r.host.Scheme + "://" + r.host.Host + r.path } +func (r *request) setMediaType(mediatype string) { + if mediatype == "" { + r.header.Set("Accept", "*/*") + } else { + r.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + } +} + +func (r *request) setOffset(offset int64) { + r.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) +} + func requestFields(req *http.Request) log.Fields { fields := map[string]interface{}{ "request.method": req.Method, diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go deleted file mode 100644 index e724e4e55..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/schema1/converter.go +++ /dev/null @@ -1,626 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package schema1 provides a converter to fetch an image formatted in Docker Image Manifest v2, Schema 1. -// -// Deprecated: use images formatted in Docker Image Manifest v2, Schema 2, or OCI Image Spec v1. -package schema1 - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/v2/core/content" - "github.com/containerd/containerd/v2/core/images" - "github.com/containerd/containerd/v2/core/remotes" - "github.com/containerd/containerd/v2/pkg/archive/compression" - "github.com/containerd/containerd/v2/pkg/deprecation" - "github.com/containerd/containerd/v2/pkg/labels" - "github.com/containerd/errdefs" - "github.com/containerd/log" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "golang.org/x/sync/errgroup" -) - -const ( - manifestSizeLimit = 8e6 // 8MB - labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" -) - -type blobState struct { - diffID digest.Digest - empty bool -} - -// Converter converts schema1 manifests to schema2 on fetch -type Converter struct { - contentStore content.Store - fetcher remotes.Fetcher - - pulledManifest *manifest - - mu sync.Mutex - blobMap map[digest.Digest]blobState - layerBlobs map[digest.Digest]ocispec.Descriptor -} - -var ErrDisabled = fmt.Errorf("Pulling Schema 1 images have been deprecated and disabled by default since containerd v2.0. "+ - "As a workaround you may set an environment variable `%s=1`, but this will be completely removed in containerd v2.1.", - deprecation.EnvPullSchema1Image) - -// NewConverter returns a new converter -func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) (*Converter, error) { - s := os.Getenv(deprecation.EnvPullSchema1Image) - if s == "" { - return nil, ErrDisabled - } - enable, err := strconv.ParseBool(s) - if err != nil { - return nil, fmt.Errorf("failed to parse `%s=%s`: %w", deprecation.EnvPullSchema1Image, s, err) - } - if !enable { - return nil, ErrDisabled - } - log.L.Warn(ErrDisabled) - return &Converter{ - contentStore: contentStore, - fetcher: fetcher, - blobMap: map[digest.Digest]blobState{}, - layerBlobs: map[digest.Digest]ocispec.Descriptor{}, - }, nil -} - -// Handle fetching descriptors for a docker media type -func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - switch desc.MediaType { - case images.MediaTypeDockerSchema1Manifest: - if err := c.fetchManifest(ctx, desc); err != nil { - return nil, err - } - - m := c.pulledManifest - if len(m.FSLayers) != len(m.History) { - return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") - } - descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) - - for i := range m.FSLayers { - if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { - empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) - if err != nil { - return nil, err - } - - // Do no attempt to download a known empty blob - if !empty { - descs = append([]ocispec.Descriptor{ - { - MediaType: images.MediaTypeDockerSchema2LayerGzip, - Digest: c.pulledManifest.FSLayers[i].BlobSum, - Size: -1, - }, - }, descs...) - } - c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ - empty: empty, - } - } - } - return descs, nil - case images.MediaTypeDockerSchema2LayerGzip: - if c.pulledManifest == nil { - return nil, errors.New("manifest required for schema 1 blob pull") - } - return nil, c.fetchBlob(ctx, desc) - default: - return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) - } -} - -// ConvertOptions provides options on converting a docker schema1 manifest. -type ConvertOptions struct { - // ManifestMediaType specifies the media type of the manifest OCI descriptor. - ManifestMediaType string - - // ConfigMediaType specifies the media type of the manifest config OCI - // descriptor. - ConfigMediaType string -} - -// ConvertOpt allows configuring a convert operation. -type ConvertOpt func(context.Context, *ConvertOptions) error - -// UseDockerSchema2 is used to indicate that a schema1 manifest should be -// converted into the media types for a docker schema2 manifest. -func UseDockerSchema2() ConvertOpt { - return func(ctx context.Context, o *ConvertOptions) error { - o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest - o.ConfigMediaType = images.MediaTypeDockerSchema2Config - return nil - } -} - -// Convert a docker manifest to an OCI descriptor -func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { - co := ConvertOptions{ - ManifestMediaType: ocispec.MediaTypeImageManifest, - ConfigMediaType: ocispec.MediaTypeImageConfig, - } - for _, opt := range opts { - if err := opt(ctx, &co); err != nil { - return ocispec.Descriptor{}, err - } - } - - history, diffIDs, err := c.schema1ManifestHistory() - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err) - } - - var img ocispec.Image - if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err) - } - - img.History = history - img.RootFS = ocispec.RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - - b, err := json.MarshalIndent(img, "", " ") - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) - } - - config := ocispec.Descriptor{ - MediaType: co.ConfigMediaType, - Digest: digest.Canonical.FromBytes(b), - Size: int64(len(b)), - } - - layers := make([]ocispec.Descriptor, len(diffIDs)) - for i, diffID := range diffIDs { - layers[i] = c.layerBlobs[diffID] - } - - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, - } - - mb, err := json.MarshalIndent(manifest, "", " ") - if err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err) - } - - desc := ocispec.Descriptor{ - MediaType: co.ManifestMediaType, - Digest: digest.Canonical.FromBytes(mb), - Size: int64(len(mb)), - } - - labels := map[string]string{} - labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() - for i, ch := range manifest.Layers { - labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() - } - - ref := remotes.MakeRefKey(ctx, desc) - if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err) - } - - ref = remotes.MakeRefKey(ctx, config) - if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { - return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err) - } - - return desc, nil -} - -// ReadStripSignature reads in a schema1 manifest and returns a byte array -// with the "signatures" field stripped -func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { - b, err := io.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB - if err != nil { - return nil, err - } - - return stripSignature(b) -} - -func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch schema 1") - - rc, err := c.fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - - b, err := ReadStripSignature(rc) - rc.Close() - if err != nil { - return err - } - - var m manifest - if err := json.Unmarshal(b, &m); err != nil { - return err - } - if len(m.Manifests) != 0 || len(m.Layers) != 0 { - return errors.New("converter: expected schema1 document but found extra keys") - } - c.pulledManifest = &m - - return nil -} - -func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { - log.G(ctx).Debug("fetch blob") - - var ( - ref = remotes.MakeRefKey(ctx, desc) - calc = newBlobStateCalculator() - compressMethod = compression.Gzip - ) - - // size may be unknown, set to zero for content ingest - ingestDesc := desc - if ingestDesc.Size == -1 { - ingestDesc.Size = 0 - } - - cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return err - } - - reuse, err := c.reuseLabelBlobState(ctx, desc) - if err != nil { - return err - } - - if reuse { - return nil - } - - ra, err := c.contentStore.ReaderAt(ctx, desc) - if err != nil { - return err - } - defer ra.Close() - - r, err := compression.DecompressStream(content.NewReader(ra)) - if err != nil { - return err - } - - compressMethod = r.GetCompression() - _, err = io.Copy(calc, r) - r.Close() - if err != nil { - return err - } - } else { - defer cw.Close() - - rc, err := c.fetcher.Fetch(ctx, desc) - if err != nil { - return err - } - defer rc.Close() - - eg, _ := errgroup.WithContext(ctx) - pr, pw := io.Pipe() - - eg.Go(func() error { - r, err := compression.DecompressStream(pr) - if err != nil { - return err - } - - compressMethod = r.GetCompression() - _, err = io.Copy(calc, r) - r.Close() - pr.CloseWithError(err) - return err - }) - - eg.Go(func() error { - defer pw.Close() - - return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) - }) - - if err := eg.Wait(); err != nil { - return err - } - } - - if desc.Size == -1 { - info, err := c.contentStore.Info(ctx, desc.Digest) - if err != nil { - return fmt.Errorf("failed to get blob info: %w", err) - } - desc.Size = info.Size - } - - if compressMethod == compression.Uncompressed { - log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") - desc.MediaType = images.MediaTypeDockerSchema2Layer - } - - state := calc.State() - - cinfo := content.Info{ - Digest: desc.Digest, - Labels: map[string]string{ - labels.LabelUncompressed: state.diffID.String(), - labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), - }, - } - - if _, err := c.contentStore.Update(ctx, cinfo, "labels."+labels.LabelUncompressed, fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { - return fmt.Errorf("failed to update uncompressed label: %w", err) - } - - c.mu.Lock() - c.blobMap[desc.Digest] = state - c.layerBlobs[state.diffID] = desc - c.mu.Unlock() - - return nil -} - -func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { - cinfo, err := c.contentStore.Info(ctx, desc.Digest) - if err != nil { - return false, fmt.Errorf("failed to get blob info: %w", err) - } - desc.Size = cinfo.Size - - diffID, ok := cinfo.Labels[labels.LabelUncompressed] - if !ok { - return false, nil - } - - emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] - if !ok { - return false, nil - } - - isEmpty, err := strconv.ParseBool(emptyVal) - if err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) - return false, nil - } - - bState := blobState{empty: isEmpty} - - if bState.diffID, err = digest.Parse(diffID); err != nil { - log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label %s: %v", labels.LabelUncompressed, diffID) - return false, nil - } - - // NOTE: there is no need to read header to get compression method - // because there are only two kinds of methods. - if bState.diffID == desc.Digest { - desc.MediaType = images.MediaTypeDockerSchema2Layer - } else { - desc.MediaType = images.MediaTypeDockerSchema2LayerGzip - } - - c.mu.Lock() - c.blobMap[desc.Digest] = bState - c.layerBlobs[bState.diffID] = desc - c.mu.Unlock() - return true, nil -} - -func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { - if c.pulledManifest == nil { - return nil, nil, errors.New("missing schema 1 manifest for conversion") - } - m := *c.pulledManifest - - if len(m.History) == 0 { - return nil, nil, errors.New("no history") - } - - history := make([]ocispec.History, len(m.History)) - diffIDs := []digest.Digest{} - for i := range m.History { - var h v1History - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { - return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err) - } - - blobSum := m.FSLayers[i].BlobSum - - state := c.blobMap[blobSum] - - history[len(history)-i-1] = ocispec.History{ - Author: h.Author, - Comment: h.Comment, - Created: &h.Created, - CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), - EmptyLayer: state.empty, - } - - if !state.empty { - diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) - - } - } - - return history, diffIDs, nil -} - -type fsLayer struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type history struct { - V1Compatibility string `json:"v1Compatibility"` -} - -type manifest struct { - FSLayers []fsLayer `json:"fsLayers"` - History []history `json:"history"` - Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest - Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index -} - -type v1History struct { - Author string `json:"author,omitempty"` - Created time.Time `json:"created"` - Comment string `json:"comment,omitempty"` - ThrowAway *bool `json:"throwaway,omitempty"` - Size *int `json:"Size,omitempty"` // used before ThrowAway field - ContainerConfig struct { - Cmd []string `json:"Cmd,omitempty"` - } `json:"container_config,omitempty"` -} - -// isEmptyLayer returns whether the v1 compatibility history describes an -// empty layer. A return value of true indicates the layer is empty, -// however false does not indicate non-empty. -func isEmptyLayer(compatHistory []byte) (bool, error) { - var h v1History - if err := json.Unmarshal(compatHistory, &h); err != nil { - return false, err - } - - if h.ThrowAway != nil { - return *h.ThrowAway, nil - } - if h.Size != nil { - return *h.Size == 0, nil - } - - // If no `Size` or `throwaway` field is given, then - // it cannot be determined whether the layer is empty - // from the history, return false - return false, nil -} - -type signature struct { - Signatures []jsParsedSignature `json:"signatures"` -} - -type jsParsedSignature struct { - Protected string `json:"protected"` -} - -type protectedBlock struct { - Length int `json:"formatLength"` - Tail string `json:"formatTail"` -} - -// joseBase64UrlDecode decodes the given string using the standard base64 url -// decoder but first adds the appropriate number of trailing '=' characters in -// accordance with the jose specification. -// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 -func joseBase64UrlDecode(s string) ([]byte, error) { - switch len(s) % 4 { - case 0: - case 2: - s += "==" - case 3: - s += "=" - default: - return nil, errors.New("illegal base64url string") - } - return base64.URLEncoding.DecodeString(s) -} - -func stripSignature(b []byte) ([]byte, error) { - var sig signature - if err := json.Unmarshal(b, &sig); err != nil { - return nil, err - } - if len(sig.Signatures) == 0 { - return nil, errors.New("no signatures") - } - pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) - if err != nil { - return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err) - } - - var protected protectedBlock - if err := json.Unmarshal(pb, &protected); err != nil { - return nil, err - } - - if protected.Length > len(b) { - return nil, errors.New("invalid protected length block") - } - - tail, err := joseBase64UrlDecode(protected.Tail) - if err != nil { - return nil, fmt.Errorf("invalid tail base 64 value: %w", err) - } - - return append(b[:protected.Length], tail...), nil -} - -type blobStateCalculator struct { - empty bool - digester digest.Digester -} - -func newBlobStateCalculator() *blobStateCalculator { - return &blobStateCalculator{ - empty: true, - digester: digest.Canonical.Digester(), - } -} - -func (c *blobStateCalculator) Write(p []byte) (int, error) { - if c.empty { - for _, b := range p { - if b != 0x00 { - c.empty = false - break - } - } - } - return c.digester.Hash().Write(p) -} - -func (c *blobStateCalculator) State() blobState { - return blobState{ - empty: c.empty, - diffID: c.digester.Digest(), - } -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go index 8135498bd..ed3831863 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/docker/scope.go @@ -88,7 +88,7 @@ func GetTokenScopes(ctx context.Context, common []string) []string { l := 0 for idx := 1; idx < len(scopes); idx++ { - // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // Note: this comparison is unaware of the scope grammar (https://distribution.github.io/distribution/spec/auth/scope/) // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. if scopes[l] == scopes[idx] { continue diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go index f60ff0fc2..d07d17408 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/errors/errors.go @@ -20,16 +20,23 @@ import ( "fmt" "io" "net/http" + + "github.com/containerd/typeurl/v2" ) var _ error = ErrUnexpectedStatus{} +func init() { + typeurl.Register(&ErrUnexpectedStatus{}, "github.com/containerd/containerd/v2/core/remotes/errors", "ErrUnexpectedStatus") +} + // ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status type ErrUnexpectedStatus struct { - Status string - StatusCode int - Body []byte - RequestURL, RequestMethod string + Status string `json:"status"` + StatusCode int `json:"statusCode"` + Body []byte `json:"body,omitempty"` + RequestURL string `json:"requestURL,omitempty"` + RequestMethod string `json:"requestMethod,omitempty"` } func (e ErrUnexpectedStatus) Error() string { diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go index 16fcdbf84..a3e1ff984 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/handlers.go @@ -80,6 +80,8 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { return "layer-" + key case images.IsKnownConfig(desc.MediaType): return "config-" + key + case images.IsAttestationType(desc.MediaType): + return "attestation-" + key default: log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType) return "unknown-" + key diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go index c39b93785..66a2acdb4 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/remotes/resolver.go @@ -21,6 +21,7 @@ import ( "io" "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/transfer" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -51,6 +52,12 @@ type Resolver interface { Pusher(ctx context.Context, ref string) (Pusher, error) } +// ResolverWithOptions is a Resolver that also supports setting options. +type ResolverWithOptions interface { + Resolver + SetOptions(options ...transfer.ImageResolverOption) +} + // Fetcher fetches content. // A fetcher implementation may implement the FetcherByDigest interface too. type Fetcher interface { diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/transfer/transfer.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/transfer/transfer.go new file mode 100644 index 000000000..cd4a4963a --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/core/transfer/transfer.go @@ -0,0 +1,178 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transfer + +import ( + "context" + "io" + + "golang.org/x/sync/semaphore" + + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/containerd/containerd/v2/core/content" + "github.com/containerd/containerd/v2/core/images" +) + +type Transferrer interface { + Transfer(ctx context.Context, source interface{}, destination interface{}, opts ...Opt) error +} + +type ImageResolver interface { + Resolve(ctx context.Context) (name string, desc ocispec.Descriptor, err error) +} + +type ImageResolverOptionSetter interface { + ImageResolver + SetResolverOptions(opts ...ImageResolverOption) +} + +type ImageResolverOption func(*ImageResolverOptions) + +type ImageResolverOptions struct { + DownloadLimiter *semaphore.Weighted + Performances ImageResolverPerformanceSettings +} + +type ImageResolverPerformanceSettings struct { + MaxConcurrentDownloads int + ConcurrentLayerFetchBuffer int +} + +func WithDownloadLimiter(limiter *semaphore.Weighted) ImageResolverOption { + return func(opts *ImageResolverOptions) { + opts.DownloadLimiter = limiter + } +} + +func WithMaxConcurrentDownloads(maxConcurrentDownloads int) ImageResolverOption { + return func(opts *ImageResolverOptions) { + opts.Performances.MaxConcurrentDownloads = maxConcurrentDownloads + } +} + +func WithConcurrentLayerFetchBuffer(ConcurrentLayerFetchBuffer int) ImageResolverOption { + return func(opts *ImageResolverOptions) { + opts.Performances.ConcurrentLayerFetchBuffer = ConcurrentLayerFetchBuffer + } +} + +type ImageFetcher interface { + ImageResolver + + Fetcher(ctx context.Context, ref string) (Fetcher, error) +} + +type ImagePusher interface { + Pusher(context.Context, ocispec.Descriptor) (Pusher, error) +} + +type Fetcher interface { + Fetch(context.Context, ocispec.Descriptor) (io.ReadCloser, error) +} + +type Pusher interface { + Push(context.Context, ocispec.Descriptor) (content.Writer, error) +} + +// ImageFilterer is used to filter out child objects of an image +type ImageFilterer interface { + ImageFilter(images.HandlerFunc, content.Store) images.HandlerFunc +} + +// ImageStorer is a type which is capable of storing images for +// the provided descriptor. The descriptor may be any type of manifest +// including an index with multiple image references. +type ImageStorer interface { + Store(context.Context, ocispec.Descriptor, images.Store) ([]images.Image, error) +} + +// ImageGetter is type which returns an image from an image store +type ImageGetter interface { + Get(context.Context, images.Store) (images.Image, error) +} + +// ImageLookup is a type which returns images from an image store +// based on names or prefixes +type ImageLookup interface { + Lookup(context.Context, images.Store) ([]images.Image, error) +} + +// ImageExporter exports images to a writer +type ImageExporter interface { + Export(context.Context, content.Store, []images.Image) error +} + +// ImageImporter imports an image into a content store +type ImageImporter interface { + Import(context.Context, content.Store) (ocispec.Descriptor, error) +} + +// ImageImportStreamer returns an import streamer based on OCI or +// Docker image tar archives. The stream should be a raw tar stream +// and without compression. +type ImageImportStreamer interface { + ImportStream(context.Context) (io.Reader, string, error) +} + +type ImageExportStreamer interface { + ExportStream(context.Context) (io.WriteCloser, string, error) +} + +type ImageUnpacker interface { + UnpackPlatforms() []UnpackConfiguration +} + +// ImagePlatformsGetter is type which returns configured platforms. +type ImagePlatformsGetter interface { + Platforms() []ocispec.Platform +} + +// UnpackConfiguration specifies the platform and snapshotter to use for resolving +// the unpack Platform, if snapshotter is not specified the platform default will +// be used. +type UnpackConfiguration struct { + Platform ocispec.Platform + Snapshotter string +} + +type ProgressFunc func(Progress) + +type Config struct { + Progress ProgressFunc +} + +type Opt func(*Config) + +func WithProgress(f ProgressFunc) Opt { + return func(opts *Config) { + opts.Progress = f + } +} + +// Progress is used to represent a particular progress event or incremental +// update for the provided named object. The parents represent the names of +// the objects which initiated the progress for the provided named object. +// The name and what object it represents is determined by the implementation. +type Progress struct { + Event string + Name string + Parents []string + Progress int64 + Total int64 + Desc *ocispec.Descriptor // since containerd v2.0 +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go index 1391884cd..192abcdde 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_darwin.go @@ -17,21 +17,6 @@ package defaults const ( - // DefaultRootDir is the default location used by containerd to store - // persistent data - DefaultRootDir = "/var/lib/containerd" - // DefaultStateDir is the default location used by containerd to store - // transient data - DefaultStateDir = "/var/run/containerd" - // DefaultAddress is the default unix socket address - DefaultAddress = "/var/run/containerd/containerd.sock" - // DefaultDebugAddress is the default unix socket address for pprof data - DefaultDebugAddress = "/var/run/containerd/debug.sock" - // DefaultFIFODir is the default location used by client-side cio library - // to store FIFOs. - DefaultFIFODir = "/var/run/containerd/fifo" // DefaultRuntime would be a multiple of choices, thus empty DefaultRuntime = "" - // DefaultConfigDir is the default location for config files. - DefaultConfigDir = "/etc/containerd" ) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_differ_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_differ_windows.go deleted file mode 100644 index 0762d259e..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_differ_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package defaults - -const ( - // DefaultDiffer will set the default differ for the platform. - // This differ should be compatible with the windows snapshotter. - DefaultDiffer = "windows" -) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go index 7187c5a76..dc1ff0909 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_linux.go @@ -17,6 +17,22 @@ package defaults const ( + // DefaultAddress is the default unix socket address + DefaultAddress = "/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/run/containerd/fifo" // DefaultRuntime is the default linux runtime DefaultRuntime = "io.containerd.runc.v2" + // DefaultSnapshotter will set the default snapshotter for the platform. + // This will be based on the client compilation target, so take that into + // account when choosing this value. + DefaultSnapshotter = "overlayfs" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/run/containerd" + // DefaultDiffer will set the default differ for the platform. + DefaultDiffer = "walking" ) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go deleted file mode 100644 index ade194717..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package defaults - -const ( - // DefaultSnapshotter will set the default snapshotter for the platform. - // This will be based on the client compilation target, so take that into - // account when choosing this value. - DefaultSnapshotter = "overlayfs" -) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go deleted file mode 100644 index 37e5f7574..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package defaults - -const ( - // DefaultSnapshotter will set the default snapshotter for the platform. - // This will be based on the client compilation target, so take that into - // account when choosing this value. - DefaultSnapshotter = "windows" -) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go index e68a522ca..41d00b943 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix.go @@ -1,4 +1,4 @@ -//go:build !windows && !darwin +//go:build unix /* Copyright The containerd Authors. @@ -19,19 +19,9 @@ package defaults const ( + // DefaultConfigDir is the default location for config files. + DefaultConfigDir = "/etc/containerd" // DefaultRootDir is the default location used by containerd to store // persistent data DefaultRootDir = "/var/lib/containerd" - // DefaultStateDir is the default location used by containerd to store - // transient data - DefaultStateDir = "/run/containerd" - // DefaultAddress is the default unix socket address - DefaultAddress = "/run/containerd/containerd.sock" - // DefaultDebugAddress is the default unix socket address for pprof data - DefaultDebugAddress = "/run/containerd/debug.sock" - // DefaultFIFODir is the default location used by client-side cio library - // to store FIFOs. - DefaultFIFODir = "/run/containerd/fifo" - // DefaultConfigDir is the default location for config files. - DefaultConfigDir = "/etc/containerd" ) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix_nolinux.go similarity index 57% rename from src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go rename to src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix_nolinux.go index fc8b081ff..8e263eca6 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_unix_nolinux.go @@ -1,4 +1,4 @@ -//go:build darwin || freebsd || solaris +//go:build unix && !linux /* Copyright The containerd Authors. @@ -19,8 +19,20 @@ package defaults const ( + // DefaultAddress is the default unix socket address + DefaultAddress = "/var/run/containerd/containerd.sock" + // DefaultDebugAddress is the default unix socket address for pprof data + DefaultDebugAddress = "/var/run/containerd/debug.sock" + // DefaultFIFODir is the default location used by client-side cio library + // to store FIFOs. + DefaultFIFODir = "/var/run/containerd/fifo" // DefaultSnapshotter will set the default snapshotter for the platform. // This will be based on the client compilation target, so take that into // account when choosing this value. DefaultSnapshotter = "native" + // DefaultStateDir is the default location used by containerd to store + // transient data + DefaultStateDir = "/var/run/containerd" + // DefaultDiffer will set the default differ for the platform. + DefaultDiffer = "walking" ) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go index 9f4bed8b0..9f01c8fb8 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/defaults/defaults_windows.go @@ -38,9 +38,16 @@ const ( DefaultAddress = `\\.\pipe\containerd-containerd` // DefaultDebugAddress is the default winpipe address for pprof data DefaultDebugAddress = `\\.\pipe\containerd-debug` + // DefaultDiffer will set the default differ for the platform. + // This differ should be compatible with the windows snapshotter. + DefaultDiffer = "windows" // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. Unused on Windows. DefaultFIFODir = "" // DefaultRuntime is the default windows runtime DefaultRuntime = "io.containerd.runhcs.v1" + // DefaultSnapshotter will set the default snapshotter for the platform. + // This will be based on the client compilation target, so take that into + // account when choosing this value. + DefaultSnapshotter = "windows" ) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/internal/lazyregexp/lazyregexp.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/internal/lazyregexp/lazyregexp.go new file mode 100644 index 000000000..08d890f31 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/internal/lazyregexp/lazyregexp.go @@ -0,0 +1,86 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code below was largely copied from golang.org/x/mod@v0.22; +// https://github.com/golang/mod/blob/v0.22.0/internal/lazyregexp/lazyre.go +// with some additional methods added. + +// Package lazyregexp is a thin wrapper over regexp, allowing the use of global +// regexp variables without forcing them to be compiled at init. +package lazyregexp + +import ( + "os" + "regexp" + "strings" + "sync" +) + +// Regexp is a wrapper around [regexp.Regexp], where the underlying regexp will be +// compiled the first time it is needed. +type Regexp struct { + str string + once sync.Once + rx *regexp.Regexp +} + +func (re *Regexp) re() *regexp.Regexp { + re.once.Do(re.build) + return re.rx +} + +func (re *Regexp) build() { + re.rx = regexp.MustCompile(re.str) + re.str = "" +} + +func (re *Regexp) FindStringIndex(s string) (loc []int) { + return re.re().FindStringIndex(s) +} + +func (re *Regexp) FindStringSubmatch(s string) []string { + return re.re().FindStringSubmatch(s) +} + +func (re *Regexp) MatchString(s string) bool { + return re.re().MatchString(s) +} + +func (re *Regexp) ReplaceAll(src, repl []byte) []byte { + return re.re().ReplaceAll(src, repl) +} + +func (re *Regexp) String() string { + return re.re().String() +} + +var inTest = len(os.Args) > 0 && strings.HasSuffix(strings.TrimSuffix(os.Args[0], ".exe"), ".test") + +// New creates a new lazy regexp, delaying the compiling work until it is first +// needed. If the code is being run as part of tests, the regexp compiling will +// happen immediately. +func New(str string) *Regexp { + lr := &Regexp{str: str} + if inTest { + // In tests, always compile the regexps early. + lr.re() + } + return lr +} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go index cba5e477c..de3cb07ad 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression.go @@ -220,7 +220,9 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { }, }, nil case Zstd: - zstdReader, err := zstd.NewReader(buf) + zstdReader, err := zstd.NewReader(buf, + zstd.WithDecoderLowmem(false), + ) if err != nil { return nil, err } diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go deleted file mode 100644 index 3516494ac..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/archive/compression/compression_fuzzer.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package compression - -import ( - "bytes" -) - -func FuzzDecompressStream(data []byte) int { - _, _ = DecompressStream(bytes.NewReader(data)) - return 1 -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go deleted file mode 100644 index 603bf41f0..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package deprecation - -type Warning string - -const ( - // Prefix is a standard prefix for all Warnings, used for filtering plugin Exports - Prefix = "io.containerd.deprecation/" - // PullSchema1Image is a warning for the use of schema 1 images - PullSchema1Image Warning = Prefix + "pull-schema-1-image" - // GoPluginLibrary is a warning for the use of dynamic library Go plugins - GoPluginLibrary Warning = Prefix + "go-plugin-library" - // CRIRegistryMirrors is a warning for the use of the `mirrors` property - CRIRegistryMirrors Warning = Prefix + "cri-registry-mirrors" - // CRIRegistryAuths is a warning for the use of the `auths` property - CRIRegistryAuths Warning = Prefix + "cri-registry-auths" - // CRIRegistryConfigs is a warning for the use of the `configs` property - CRIRegistryConfigs Warning = Prefix + "cri-registry-configs" - // OTLPTracingConfig is a warning for the use of the `otlp` property - TracingOTLPConfig Warning = Prefix + "tracing-processor-config" - // TracingServiceConfig is a warning for the use of the `tracing` property - TracingServiceConfig Warning = Prefix + "tracing-service-config" -) - -const ( - EnvPrefix = "CONTAINERD_ENABLE_DEPRECATED_" - EnvPullSchema1Image = EnvPrefix + "PULL_SCHEMA_1_IMAGE" -) - -var messages = map[Warning]string{ - PullSchema1Image: "Schema 1 images are deprecated since containerd v1.7, disabled in containerd v2.0, and will be removed in containerd v2.1. " + - `Since containerd v1.7.8, schema 1 images are identified by the "io.containerd.image/converted-docker-schema1" label.`, - GoPluginLibrary: "Dynamically-linked Go plugins as containerd runtimes are deprecated since containerd v2.0 and removed in containerd v2.1.", - CRIRegistryMirrors: "The `mirrors` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." + - "Use `config_path` instead.", - CRIRegistryAuths: "The `auths` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.3 and will be removed in containerd v2.1." + - "Use `ImagePullSecrets` instead.", - CRIRegistryConfigs: "The `configs` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." + - "Use `config_path` instead.", - - TracingOTLPConfig: "The `otlp` property of `[plugins.\"io.containerd.tracing.processor.v1\".otlp]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." + - "Use OTLP environment variables instead: https://opentelemetry.io/docs/specs/otel/protocol/exporter/", - TracingServiceConfig: "The `tracing` property of `[plugins.\"io.containerd.internal.v1\".tracing]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." + - "Use OTEL environment variables instead: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/", -} - -// Valid checks whether a given Warning is valid -func Valid(id Warning) bool { - _, ok := messages[id] - return ok -} - -// Message returns the human-readable message for a given Warning -func Message(id Warning) (string, bool) { - msg, ok := messages[id] - return msg, ok -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/identifiers/validate.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/identifiers/validate.go index 0acbf3fc4..c1c7a336a 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/identifiers/validate.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/identifiers/validate.go @@ -26,8 +26,8 @@ package identifiers import ( "fmt" - "regexp" + "github.com/containerd/containerd/v2/internal/lazyregexp" "github.com/containerd/errdefs" ) @@ -39,7 +39,7 @@ const ( var ( // identifierRe defines the pattern for valid identifiers. - identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*")) + identifierRe = lazyregexp.New(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*")) ) // Validate returns nil if the string s is a valid identifier. diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/namespaces/ttrpc.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/namespaces/ttrpc.go index bcd2643cf..f9b24ad69 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/namespaces/ttrpc.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/namespaces/ttrpc.go @@ -27,20 +27,12 @@ const ( TTRPCHeader = "containerd-namespace-ttrpc" ) -func copyMetadata(src ttrpc.MD) ttrpc.MD { - md := ttrpc.MD{} - for k, v := range src { - md[k] = append(md[k], v...) - } - return md -} - func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { md, ok := ttrpc.GetMetadata(ctx) if !ok { md = ttrpc.MD{} } else { - md = copyMetadata(md) + md = md.Clone() } md.Set(TTRPCHeader, namespace) return ttrpc.WithMetadata(ctx, md) diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go index d983c4e13..55c7b6fb5 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/pkg/reference/reference.go @@ -20,9 +20,9 @@ import ( "errors" "net/url" "path" - "regexp" "strings" + "github.com/containerd/containerd/v2/internal/lazyregexp" digest "github.com/opencontainers/go-digest" ) @@ -80,7 +80,7 @@ type Spec struct { Object string } -var splitRe = regexp.MustCompile(`[:@]`) +var splitRe = lazyregexp.New(`[:@]`) // Parse parses the string into a structured ref. func Parse(s string) (Spec, error) { diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go deleted file mode 100644 index 39ec84b37..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/plugins/content/local/content_local_fuzzer.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build gofuzz - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "bufio" - "bytes" - "context" - _ "crypto/sha256" - "io" - "testing" - - "github.com/opencontainers/go-digest" - - "github.com/containerd/containerd/v2/core/content" -) - -func FuzzContentStoreWriter(data []byte) int { - t := &testing.T{} - ctx := context.Background() - ctx, _, cs, cleanup := contentStoreEnv(t) - defer cleanup() - - cw, err := cs.Writer(ctx, content.WithRef("myref")) - if err != nil { - return 0 - } - if err := cw.Close(); err != nil { - return 0 - } - - // reopen, so we can test things - cw, err = cs.Writer(ctx, content.WithRef("myref")) - if err != nil { - return 0 - } - - err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data)))) - if err != nil { - return 0 - } - expected := digest.FromBytes(data) - - if err = cw.Commit(ctx, int64(len(data)), expected); err != nil { - return 0 - } - return 1 -} - -func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error { - nn, err := io.Copy(dst, src) - if err != nil { - return err - } - - if nn != size { - return err - } - return nil -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go deleted file mode 100644 index a4c238c47..000000000 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/plugins/content/local/test_helper.go +++ /dev/null @@ -1,38 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "testing" - - "github.com/containerd/containerd/v2/core/content" -) - -func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) { - tmpdir := t.TempDir() - - cs, err := NewStore(tmpdir) - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithCancel(context.Background()) - return ctx, tmpdir, cs, func() { - cancel() - } -} diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/version/version.go b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/version/version.go index 28f46ed09..709cbe947 100644 --- a/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/version/version.go +++ b/src/cmd/linuxkit/vendor/github.com/containerd/containerd/v2/version/version.go @@ -19,11 +19,12 @@ package version import "runtime" var ( + Name = "containerd" // Package is filled at linking time Package = "github.com/containerd/containerd/v2" // Version holds the complete version number. Filled in at linking time. - Version = "2.0.2+unknown" + Version = "2.1.3+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/src/cmd/linuxkit/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go b/src/cmd/linuxkit/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go new file mode 100644 index 000000000..d7cd2b8c1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/containerd/errdefs/pkg/errhttp/http.go @@ -0,0 +1,96 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errhttp provides utility functions for translating errors to +// and from a HTTP context. +// +// The functions ToHTTP and ToNative can be used to map server-side and +// client-side errors to the correct types. +package errhttp + +import ( + "errors" + "net/http" + + "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/internal/cause" +) + +// ToHTTP returns the best status code for the given error +func ToHTTP(err error) int { + switch { + case errdefs.IsNotFound(err): + return http.StatusNotFound + case errdefs.IsInvalidArgument(err): + return http.StatusBadRequest + case errdefs.IsConflict(err): + return http.StatusConflict + case errdefs.IsNotModified(err): + return http.StatusNotModified + case errdefs.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case errdefs.IsUnauthorized(err): + return http.StatusUnauthorized + case errdefs.IsPermissionDenied(err): + return http.StatusForbidden + case errdefs.IsResourceExhausted(err): + return http.StatusTooManyRequests + case errdefs.IsInternal(err): + return http.StatusInternalServerError + case errdefs.IsNotImplemented(err): + return http.StatusNotImplemented + case errdefs.IsUnavailable(err): + return http.StatusServiceUnavailable + case errdefs.IsUnknown(err): + var unexpected cause.ErrUnexpectedStatus + if errors.As(err, &unexpected) && unexpected.Status >= 200 && unexpected.Status < 600 { + return unexpected.Status + } + return http.StatusInternalServerError + default: + return http.StatusInternalServerError + } +} + +// ToNative returns the error best matching the HTTP status code +func ToNative(statusCode int) error { + switch statusCode { + case http.StatusNotFound: + return errdefs.ErrNotFound + case http.StatusBadRequest: + return errdefs.ErrInvalidArgument + case http.StatusConflict: + return errdefs.ErrConflict + case http.StatusPreconditionFailed: + return errdefs.ErrFailedPrecondition + case http.StatusUnauthorized: + return errdefs.ErrUnauthenticated + case http.StatusForbidden: + return errdefs.ErrPermissionDenied + case http.StatusNotModified: + return errdefs.ErrNotModified + case http.StatusTooManyRequests: + return errdefs.ErrResourceExhausted + case http.StatusInternalServerError: + return errdefs.ErrInternal + case http.StatusNotImplemented: + return errdefs.ErrNotImplemented + case http.StatusServiceUnavailable: + return errdefs.ErrUnavailable + default: + return cause.ErrUnexpectedStatus{Status: statusCode} + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/AUTHORS b/src/cmd/linuxkit/vendor/github.com/docker/cli/AUTHORS index ad1abd496..c5a480b5e 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/AUTHORS +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/AUTHORS @@ -48,6 +48,7 @@ Alfred Landrum Ali Rostami Alicia Lauerman Allen Sun +Allie Sadler Alvin Deng Amen Belayneh Amey Shrivastava <72866602+AmeyShrivastava@users.noreply.github.com> @@ -81,6 +82,7 @@ Antonis Kalipetis Anusha Ragunathan Ao Li Arash Deshmeh +Archimedes Trajano Arko Dasgupta Arnaud Porterie Arnaud Rebillout @@ -88,6 +90,7 @@ Arthur Peka Ashly Mathew Ashwini Oruganti Aslam Ahemad +Austin Vazquez Azat Khuyiyakhmetov Bardia Keyoumarsi Barnaby Gray @@ -132,6 +135,7 @@ Cao Weiwei Carlo Mion Carlos Alexandro Becker Carlos de Paula +Carston Schilds Casey Korver Ce Gao Cedric Davies @@ -189,6 +193,7 @@ Daisuke Ito dalanlan Damien Nadé Dan Cotora +Dan Wallis Danial Gharib Daniel Artine Daniel Cassidy @@ -237,6 +242,7 @@ Deshi Xiao Dharmit Shah Dhawal Yogesh Bhanushali Dieter Reuter +Dilep Dev <34891655+DilepDev@users.noreply.github.com> Dima Stopel Dimitry Andric Ding Fei @@ -308,6 +314,8 @@ George MacRorie George Margaritis George Xie Gianluca Borello +Giau. Tran Minh +Giedrius Jonikas Gildas Cuisinier Gio d'Amelio Gleb Stsenov @@ -344,6 +352,7 @@ Hugo Gabriel Eyherabide huqun Huu Nguyen Hyzhou Zhy +Iain MacDonald Iain Samuel McLean Elder Ian Campbell Ian Philpot @@ -393,6 +402,7 @@ Jesse Adametz Jessica Frazelle Jezeniel Zapanta Jian Zhang +Jianyong Wu Jie Luo Jilles Oldenbeuving Jim Chen @@ -446,6 +456,7 @@ Julian Julien Barbier Julien Kassar Julien Maitrehenry +Julio Cesar Garcia Justas Brazauskas Justin Chadwell Justin Cormack @@ -490,19 +501,22 @@ Kunal Kushwaha Kyle Mitofsky Lachlan Cooper Lai Jiangshan +Lajos Papp Lars Kellogg-Stedman Laura Brehm Laura Frank Laurent Erignoux +Laurent Goderre Lee Gaines Lei Jitang Lennie +lentil32 Leo Gallucci Leonid Skorospelov Lewis Daly Li Fu Bang Li Yi -Li Yi +Li Zeghong Liang-Chi Hsieh Lihua Tang Lily Guo @@ -515,6 +529,7 @@ lixiaobing10051267 Lloyd Dewolf Lorenzo Fontana Louis Opter +Lovekesh Kumar Luca Favatella Luca Marturana Lucas Chan @@ -559,6 +574,7 @@ Matt Robenolt Matteo Orefice Matthew Heon Matthieu Hauglustaine +Matthieu MOREL Mauro Porras P Max Shytikov Max-Julian Pogner @@ -566,6 +582,7 @@ Maxime Petazzoni Maximillian Fan Xavier Mei ChunTao Melroy van den Berg +Mert Şişmanoğlu Metal <2466052+tedhexaflow@users.noreply.github.com> Micah Zoltu Michael A. Smith @@ -598,7 +615,9 @@ Mindaugas Rukas Miroslav Gula Misty Stanley-Jones Mohammad Banikazemi +Mohammad Hossein Mohammed Aaqib Ansari +Mohammed Aminu Futa Mohini Anne Dsouza Moorthy RS Morgan Bauer @@ -633,9 +652,11 @@ Nicolas De Loof Nikhil Chawla Nikolas Garofil Nikolay Milovanov +NinaLua Nir Soffer Nishant Totla NIWA Hideyuki +Noah Silas Noah Treuhaft O.S. Tezer Oded Arbel @@ -653,10 +674,12 @@ Patrick Böänziger Patrick Daigle <114765035+pdaig@users.noreply.github.com> Patrick Hemmer Patrick Lang +Patrick St. laurent Paul Paul Kehrer Paul Lietar Paul Mulders +Paul Rogalski Paul Seyfert Paul Weaver Pavel Pospisil @@ -678,7 +701,6 @@ Philip Alexander Etling Philipp Gillé Philipp Schmied Phong Tran -pidster Pieter E Smit pixelistik Pratik Karki @@ -738,6 +760,7 @@ Samuel Cochran Samuel Karp Sandro Jäckel Santhosh Manohar +Sarah Sanders Sargun Dhillon Saswat Bhattacharya Saurabh Kumar @@ -770,6 +793,7 @@ Spencer Brown Spring Lee squeegels Srini Brahmaroutu +Stavros Panakakis Stefan S. Stefan Scherer Stefan Weil @@ -780,6 +804,7 @@ Steve Durrheimer Steve Richards Steven Burgess Stoica-Marcu Floris-Andrei +Stuart Williams Subhajit Ghosh Sun Jianbo Sune Keller @@ -867,6 +892,7 @@ Wang Yumu <37442693@qq.com> Wataru Ishida Wayne Song Wen Cheng Ma +Wenlong Zhang Wenzhi Liang Wes Morgan Wewang Xiaorenfine @@ -908,3 +934,4 @@ Zhuo Zhi Átila Camurça Alves Александр Менщиков <__Singleton__@hackerdom.ru> 徐俊杰 +林博仁 Buo-ren Lin diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go index 910b3c006..cbb34486a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/config.go @@ -58,7 +58,7 @@ func resetConfigDir() { // getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker // as dependency for consumers that only need to read the config-file. // -// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get +// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get func getHomeDir() string { home, _ := os.UserHomeDir() if home == "" && runtime.GOOS != "windows" { @@ -69,6 +69,11 @@ func getHomeDir() string { return home } +// Provider defines an interface for providing the CLI config. +type Provider interface { + ConfigFile() *configfile.ConfigFile +} + // Dir returns the directory the configuration file is stored in func Dir() string { initConfigDir.Do(func() { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/configfile/file.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/configfile/file.go index ae9dcb337..24969ef69 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/configfile/file.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -36,12 +36,14 @@ type ConfigFile struct { NodesFormat string `json:"nodesFormat,omitempty"` PruneFilters []string `json:"pruneFilters,omitempty"` Proxies map[string]ProxyConfig `json:"proxies,omitempty"` - Experimental string `json:"experimental,omitempty"` CurrentContext string `json:"currentContext,omitempty"` CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` Plugins map[string]map[string]string `json:"plugins,omitempty"` Aliases map[string]string `json:"aliases,omitempty"` Features map[string]string `json:"features,omitempty"` + + // Deprecated: experimental CLI features are always enabled and this field is no longer used. Use [Features] instead for optional features. This field will be removed in a future release. + Experimental string `json:"experimental,omitempty"` } // ProxyConfig contains proxy configuration settings @@ -150,7 +152,8 @@ func (configFile *ConfigFile) Save() (retErr error) { return err } defer func() { - temp.Close() + // ignore error as the file may already be closed when we reach this. + _ = temp.Close() if retErr != nil { if err := os.Remove(temp.Name()); err != nil { logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") @@ -167,10 +170,16 @@ func (configFile *ConfigFile) Save() (retErr error) { return errors.Wrap(err, "error closing temp file") } - // Handle situation where the configfile is a symlink + // Handle situation where the configfile is a symlink, and allow for dangling symlinks cfgFile := configFile.Filename - if f, err := os.Readlink(cfgFile); err == nil { + if f, err := filepath.EvalSymlinks(cfgFile); err == nil { cfgFile = f + } else if os.IsNotExist(err) { + // extract the path from the error if the configfile does not exist or is a dangling symlink + var pathError *os.PathError + if errors.As(err, &pathError) { + cfgFile = pathError.Path + } } // Try copying the current config file (if any) ownership and permissions diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go index c100b97ee..4b04f8b39 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go @@ -28,7 +28,6 @@ import ( "syscall" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -149,7 +148,7 @@ func (c *commandConn) handleEOF(err error) error { c.stderrMu.Lock() stderr := c.stderr.String() c.stderrMu.Unlock() - return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr) + return fmt.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, err, stderr) } } @@ -159,7 +158,7 @@ func (c *commandConn) handleEOF(err error) error { c.stderrMu.Lock() stderr := c.stderr.String() c.stderrMu.Unlock() - return errors.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr) + return fmt.Errorf("command %v has exited with %v, make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr) } func ignorableCloseError(err error) bool { @@ -253,17 +252,17 @@ func (c *commandConn) RemoteAddr() net.Addr { return c.remoteAddr } -func (c *commandConn) SetDeadline(t time.Time) error { +func (*commandConn) SetDeadline(t time.Time) error { logrus.Debugf("unimplemented call: SetDeadline(%v)", t) return nil } -func (c *commandConn) SetReadDeadline(t time.Time) error { +func (*commandConn) SetReadDeadline(t time.Time) error { logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t) return nil } -func (c *commandConn) SetWriteDeadline(t time.Time) error { +func (*commandConn) SetWriteDeadline(t time.Time) error { logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t) return nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go index 152d3e295..f4b938859 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/connhelper.go @@ -3,13 +3,13 @@ package connhelper import ( "context" + "fmt" "net" "net/url" "strings" "github.com/docker/cli/cli/connhelper/commandconn" "github.com/docker/cli/cli/connhelper/ssh" - "github.com/pkg/errors" ) // ConnectionHelper allows to connect to a remote host with custom stream provider binary. @@ -41,9 +41,9 @@ func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper return nil, err } if u.Scheme == "ssh" { - sp, err := ssh.ParseURL(daemonURL) + sp, err := ssh.NewSpec(u) if err != nil { - return nil, errors.Wrap(err, "ssh host connection is not valid") + return nil, fmt.Errorf("ssh host connection is not valid: %w", err) } sshFlags = addSSHTimeout(sshFlags) sshFlags = disablePseudoTerminalAllocation(sshFlags) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go index fb4c91110..2c9d0d61b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go @@ -2,19 +2,46 @@ package ssh import ( + "errors" + "fmt" "net/url" - - "github.com/pkg/errors" ) -// ParseURL parses URL +// ParseURL creates a [Spec] from the given ssh URL. It returns an error if +// the URL is using the wrong scheme, contains fragments, query-parameters, +// or contains a password. func ParseURL(daemonURL string) (*Spec, error) { u, err := url.Parse(daemonURL) if err != nil { - return nil, err + var urlErr *url.Error + if errors.As(err, &urlErr) { + err = urlErr.Unwrap() + } + return nil, fmt.Errorf("invalid SSH URL: %w", err) + } + return NewSpec(u) +} + +// NewSpec creates a [Spec] from the given ssh URL's properties. It returns +// an error if the URL is using the wrong scheme, contains fragments, +// query-parameters, or contains a password. +func NewSpec(sshURL *url.URL) (*Spec, error) { + s, err := newSpec(sshURL) + if err != nil { + return nil, fmt.Errorf("invalid SSH URL: %w", err) + } + return s, nil +} + +func newSpec(u *url.URL) (*Spec, error) { + if u == nil { + return nil, errors.New("URL is nil") + } + if u.Scheme == "" { + return nil, errors.New("no scheme provided") } if u.Scheme != "ssh" { - return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme) + return nil, errors.New("incorrect scheme: " + u.Scheme) } var sp Spec @@ -27,17 +54,18 @@ func ParseURL(daemonURL string) (*Spec, error) { } sp.Host = u.Hostname() if sp.Host == "" { - return nil, errors.Errorf("no host specified") + return nil, errors.New("hostname is empty") } sp.Port = u.Port() sp.Path = u.Path if u.RawQuery != "" { - return nil, errors.Errorf("extra query after the host: %q", u.RawQuery) + return nil, fmt.Errorf("query parameters are not allowed: %q", u.RawQuery) } if u.Fragment != "" { - return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment) + return nil, fmt.Errorf("fragments are not allowed: %q", u.Fragment) } - return &sp, err + + return &sp, nil } // Spec of SSH URL diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker-credential-helpers/client/command.go b/src/cmd/linuxkit/vendor/github.com/docker/docker-credential-helpers/client/command.go index 1936234be..93863480b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -15,27 +15,30 @@ type Program interface { // ProgramFunc is a type of function that initializes programs based on arguments. type ProgramFunc func(args ...string) Program -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return NewShellProgramFuncWithEnv(name, nil) -} - -// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables -func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { +// NewShellProgramFunc creates a [ProgramFunc] to run command in a [Shell]. +func NewShellProgramFunc(command string) ProgramFunc { return func(args ...string) Program { - return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + return createProgramCmdRedirectErr(command, args, nil) } } -func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { - programCmd := exec.Command(commandName, args...) +// NewShellProgramFuncWithEnv creates a [ProgramFunc] tu run command +// in a [Shell] with the given environment variables. +func NewShellProgramFuncWithEnv(command string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return createProgramCmdRedirectErr(command, args, env) + } +} + +func createProgramCmdRedirectErr(command string, args []string, env *map[string]string) *Shell { + ec := exec.Command(command, args...) if env != nil { for k, v := range *env { - programCmd.Env = append(programCmd.Environ(), k+"="+v) + ec.Env = append(ec.Environ(), k+"="+v) } } - programCmd.Stderr = os.Stderr - return programCmd + ec.Stderr = os.Stderr + return &Shell{cmd: ec} } // Shell invokes shell commands to talk with a remote credentials-helper. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/AUTHORS b/src/cmd/linuxkit/vendor/github.com/docker/docker/AUTHORS index 88032defe..c7c649471 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/AUTHORS +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/AUTHORS @@ -2,6 +2,7 @@ # This file lists all contributors to the repository. # See hack/generate-authors.sh to make modifications. +17neverends 7sunarni <710720732@qq.com> Aanand Prasad Aarni Koskela @@ -189,6 +190,7 @@ Anes Hasicic Angel Velazquez Anil Belur Anil Madhavapeddy +Anirudh Aithal Ankit Jain Ankush Agarwal Anonmily @@ -227,7 +229,7 @@ Arun Gupta Asad Saeeduddin Asbjørn Enge Ashly Mathew -Austin Vazquez +Austin Vazquez averagehuman Avi Das Avi Kivity @@ -293,6 +295,7 @@ Brandon Liu Brandon Philips Brandon Rhodes Brendan Dixon +Brendon Smith Brennan Kinney <5098581+polarathene@users.noreply.github.com> Brent Salisbury Brett Higgins @@ -347,6 +350,7 @@ Casey Bisson Catalin Pirvu Ce Gao Cedric Davies +Cesar Talledo Cezar Sa Espinola Chad Swenson Chance Zibolski @@ -375,6 +379,7 @@ Chen Qiu Cheng-mean Liu Chengfei Shang Chengguang Xu +Chengyu Zhu Chentianze Chenyang Yan chenyuzhu @@ -1207,6 +1212,7 @@ K. Heller Kai Blin Kai Qiang Wu (Kennan) Kaijie Chen +Kaita Nakamura Kamil Domański Kamjar Gerami Kanstantsin Shautsou @@ -1281,6 +1287,7 @@ Krasi Georgiev Krasimir Georgiev Kris-Mikael Krister Kristian Haugene +Kristian Heljas Kristina Zabunova Krystian Wojcicki Kunal Kushwaha @@ -1482,6 +1489,7 @@ Matthias Kühnle Matthias Rampke Matthieu Fronton Matthieu Hauglustaine +Matthieu MOREL Mattias Jernberg Mauricio Garavaglia mauriyouth @@ -1712,6 +1720,7 @@ Patrick Hemmer Patrick St. laurent Patrick Stapleton Patrik Cyvoct +Patrik Leifert pattichen Paul "TBBle" Hampson Paul @@ -1870,6 +1879,7 @@ Robert Obryk Robert Schneider Robert Shade Robert Stern +Robert Sturla Robert Terhaar Robert Wallis Robert Wang diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go index 2c62cd403..c375c68f2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/common.go @@ -3,7 +3,7 @@ package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of the current REST API. - DefaultVersion = "1.48" + DefaultVersion = "1.50" // MinSupportedAPIVersion is the minimum API version that can be supported // by the API server, specified as "major.minor". Note that the daemon diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/swagger.yaml b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/swagger.yaml index d4e4f1197..619b4470e 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/swagger.yaml +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.48" +basePath: "/v1.50" info: title: "Docker Engine API" - version: "1.48" + version: "1.50" x-logo: url: "https://docs.docker.com/assets/images/logo-docker-main.png" description: | @@ -55,8 +55,8 @@ info: the URL is not supported by the daemon, a HTTP `400 Bad Request` error message is returned. - If you omit the version-prefix, the current version of the API (v1.48) is used. - For example, calling `/info` is the same as calling `/v1.48/info`. Using the + If you omit the version-prefix, the current version of the API (v1.50) is used. + For example, calling `/info` is the same as calling `/v1.50/info`. Using the API without a version-prefix is deprecated and will be removed in a future release. Engine releases in the near future should support this version of the API, @@ -965,13 +965,18 @@ definitions: ContainerIDFile: type: "string" description: "Path to a file where the container ID is written" + example: "" LogConfig: type: "object" description: "The logging configuration for this container" properties: Type: + description: |- + Name of the logging driver used for the container or "none" + if logging is disabled. type: "string" enum: + - "local" - "json-file" - "syslog" - "journald" @@ -982,9 +987,14 @@ definitions: - "etwlogs" - "none" Config: + description: |- + Driver-specific configuration options for the logging driver. type: "object" additionalProperties: type: "string" + example: + "max-file": "5" + "max-size": "10m" NetworkMode: type: "string" description: | @@ -1027,6 +1037,7 @@ definitions: items: type: "integer" minimum: 0 + example: [80, 64] Annotations: type: "object" description: | @@ -1129,7 +1140,8 @@ definitions: - `"host"`: use the host's PID namespace inside the container Privileged: type: "boolean" - description: "Gives the container full access to the host." + description: |- + Gives the container full access to the host. PublishAllPorts: type: "boolean" description: | @@ -1186,18 +1198,20 @@ definitions: minimum: 0 Sysctls: type: "object" - description: | + x-nullable: true + description: |- A list of kernel parameters (sysctls) to set in the container. - For example: - ``` - {"net.ipv4.ip_forward": "1"} - ``` + This field is omitted if not set. additionalProperties: type: "string" + example: + "net.ipv4.ip_forward": "1" Runtime: type: "string" - description: "Runtime to use with this container." + x-nullable: true + description: |- + Runtime to use with this container. # Applicable to Windows Isolation: type: "string" @@ -1215,6 +1229,18 @@ definitions: the default set of paths). items: type: "string" + example: + - "/proc/asound" + - "/proc/acpi" + - "/proc/kcore" + - "/proc/keys" + - "/proc/latency_stats" + - "/proc/timer_list" + - "/proc/timer_stats" + - "/proc/sched_debug" + - "/proc/scsi" + - "/sys/firmware" + - "/sys/devices/virtual/powercap" ReadonlyPaths: type: "array" description: | @@ -1222,6 +1248,12 @@ definitions: (this overrides the default set of paths). items: type: "string" + example: + - "/proc/bus" + - "/proc/fs" + - "/proc/irq" + - "/proc/sys" + - "/proc/sysrq-trigger" ContainerConfig: description: | @@ -1238,8 +1270,14 @@ definitions: The domain name to use for the container. type: "string" User: - description: "The user that commands are run as inside the container." + description: |- + Commands run as this user inside the container. If omitted, commands + run as the user specified in the image the container was started from. + + Can be either user-name or UID, and optional group-name or GID, + separated by a colon (`[<:group-name|GID>]`). type: "string" + example: "123:456" AttachStdin: description: "Whether to attach to `stdin`." type: "boolean" @@ -1390,63 +1428,10 @@ definitions: when starting a container from the image. type: "object" properties: - Hostname: - description: | - The hostname to use for the container, as a valid RFC 1123 hostname. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. - type: "string" - example: "" - Domainname: - description: | - The domain name to use for the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. - type: "string" - example: "" User: description: "The user that commands are run as inside the container." type: "string" example: "web:web" - AttachStdin: - description: | - Whether to attach to `stdin`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - AttachStdout: - description: | - Whether to attach to `stdout`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - AttachStderr: - description: | - Whether to attach to `stderr`. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false ExposedPorts: description: | An object mapping ports to an empty object in the form: @@ -1463,39 +1448,6 @@ definitions: "80/tcp": {}, "443/tcp": {} } - Tty: - description: | - Attach standard streams to a TTY, including `stdin` if it is not closed. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - OpenStdin: - description: | - Open `stdin` - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - StdinOnce: - description: | - Close `stdin` after one attached client disconnects. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always false. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false Env: description: | A list of environment variables to set inside the container in the @@ -1521,18 +1473,6 @@ definitions: default: false example: false x-nullable: true - Image: - description: | - The name (or reference) of the image to use when creating the container, - or which was used when the container was created. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always empty. It must not be used, and will be removed in API v1.48. - type: "string" - default: "" - example: "" Volumes: description: | An object mapping mount point paths inside the container to empty @@ -1561,30 +1501,6 @@ definitions: items: type: "string" example: [] - NetworkDisabled: - description: | - Disable networking for the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. - type: "boolean" - default: false - example: false - x-nullable: true - MacAddress: - description: | - MAC address of the container. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. - type: "string" - default: "" - example: "" - x-nullable: true OnBuild: description: | `ONBUILD` metadata that were defined in the image's `Dockerfile`. @@ -1607,17 +1523,6 @@ definitions: type: "string" example: "SIGTERM" x-nullable: true - StopTimeout: - description: | - Timeout to stop a container in seconds. - -


- - > **Deprecated**: this field is not part of the image specification and is - > always omitted. It must not be used, and will be removed in API v1.48. - type: "integer" - default: 10 - x-nullable: true Shell: description: | Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. @@ -1628,19 +1533,11 @@ definitions: example: ["/bin/sh", "-c"] # FIXME(thaJeztah): temporarily using a full example to remove some "omitempty" fields. Remove once the fields are removed. example: - "Hostname": "" - "Domainname": "" "User": "web:web" - "AttachStdin": false - "AttachStdout": false - "AttachStderr": false "ExposedPorts": { "80/tcp": {}, "443/tcp": {} } - "Tty": false - "OpenStdin": false - "StdinOnce": false "Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"] "Cmd": ["/bin/sh"] "Healthcheck": { @@ -1652,7 +1549,6 @@ definitions: "StartInterval": 0 } "ArgsEscaped": true - "Image": "" "Volumes": { "/app/data": {}, "/app/config": {} @@ -2918,6 +2814,23 @@ definitions: progressDetail: $ref: "#/definitions/ProgressDetail" + DeviceInfo: + type: "object" + description: | + DeviceInfo represents a device that can be used by a container. + properties: + Source: + type: "string" + example: "cdi" + description: | + The origin device driver. + ID: + type: "string" + example: "vendor.com/gpu=0" + description: | + The unique identifier for the device within its source driver. + For CDI devices, this would be an FQDN like "vendor.com/gpu=0". + ErrorDetail: type: "object" properties: @@ -5091,24 +5004,226 @@ definitions: Warnings: - "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerInspectResponse: + type: "object" + title: "ContainerInspectResponse" + x-go-name: "InspectResponse" + properties: + Id: + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). + type: "string" + x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" + Created: + description: |- + Date and time at which the container was created, formatted in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2025-02-17T17:43:39.64001363Z" + Path: + description: |- + The path to the command being run + type: "string" + example: "/bin/sh" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + example: + - "-c" + - "exit 9" + State: + $ref: "#/definitions/ContainerState" + Image: + description: |- + The ID (digest) of the image that this container was created from. + type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" + ResolvConfPath: + description: |- + Location of the `/etc/resolv.conf` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/resolv.conf" + HostnamePath: + description: |- + Location of the `/etc/hostname` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hostname" + HostsPath: + description: |- + Location of the `/etc/hosts` generated for the container on the + host. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + example: "/var/lib/docker/containers/aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf/hosts" + LogPath: + description: |- + Location of the file used to buffer the container's logs. Depending on + the logging-driver used for the container, this field may be omitted. + + This file is managed through the docker daemon, and should not be + accessed or modified by other tools. + type: "string" + x-nullable: true + example: "/var/lib/docker/containers/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59/5b7c7e2b992aa426584ce6c47452756066be0e503a08b4516a433a54d2f69e59-json.log" + Name: + description: |- + The name associated with this container. + + For historic reasons, the name may be prefixed with a forward-slash (`/`). + type: "string" + example: "/funny_chatelet" + RestartCount: + description: |- + Number of times the container was restarted since it was created, + or since daemon was started. + type: "integer" + example: 0 + Driver: + description: |- + The storage-driver used for the container's filesystem (graph-driver + or snapshotter). + type: "string" + example: "overlayfs" + Platform: + description: |- + The platform (operating system) for which the container was created. + + This field was introduced for the experimental "LCOW" (Linux Containers + On Windows) features, which has been removed. In most cases, this field + is equal to the host's operating system (`linux` or `windows`). + type: "string" + example: "linux" + ImageManifestDescriptor: + $ref: "#/definitions/OCIDescriptor" + description: |- + OCI descriptor of the platform-specific manifest of the image + the container was created from. + + Note: Only available if the daemon provides a multi-platform + image store. + MountLabel: + description: |- + SELinux mount label set for the container. + type: "string" + example: "" + ProcessLabel: + description: |- + SELinux process label set for the container. + type: "string" + example: "" + AppArmorProfile: + description: |- + The AppArmor profile set for the container. + type: "string" + example: "" + ExecIDs: + description: |- + IDs of exec instances that are running in the container. + type: "array" + items: + type: "string" + x-nullable: true + example: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/DriverData" + SizeRw: + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "122880" + SizeRootFs: + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. + type: "integer" + format: "int64" + x-nullable: true + example: "1653948416" + Mounts: + description: |- + List of mounts used by the container. + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + ContainerSummary: type: "object" properties: Id: - description: "The ID of this container" + description: |- + The ID of this container as a 128-bit (64-character) hexadecimal string (32 bytes). type: "string" x-go-name: "ID" + minLength: 64 + maxLength: 64 + pattern: "^[0-9a-fA-F]{64}$" + example: "aa86eacfb3b3ed4cd362c1e88fc89a53908ad05fb3a4103bca3f9b28292d14bf" Names: - description: "The names that this container has been given" + description: |- + The names associated with this container. Most containers have a single + name, but when using legacy "links", the container can have multiple + names. + + For historic reasons, names are prefixed with a forward-slash (`/`). type: "array" items: type: "string" + example: + - "/funny_chatelet" Image: - description: "The name of the image used when creating this container" + description: |- + The name or ID of the image used to create the container. + + This field shows the image reference as was specified when creating the container, + which can be in its canonical form (e.g., `docker.io/library/ubuntu:latest` + or `docker.io/library/ubuntu@sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`), + short form (e.g., `ubuntu:latest`)), or the ID(-prefix) of the image (e.g., `72297848456d`). + + The content of this field can be updated at runtime if the image used to + create the container is untagged, in which case the field is updated to + contain the the image ID (digest) it was resolved to in its canonical, + non-truncated form (e.g., `sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782`). type: "string" + example: "docker.io/library/ubuntu:latest" ImageID: - description: "The ID of the image that this container was created from" + description: |- + The ID (digest) of the image that this container was created from. type: "string" + example: "sha256:72297848456d5d37d1262630108ab308d3e9ec7ed1c3286a32fe09856619a782" ImageManifestDescriptor: $ref: "#/definitions/OCIDescriptor" x-nullable: true @@ -5123,55 +5238,112 @@ definitions: Command: description: "Command to run when starting the container" type: "string" + example: "/bin/bash" Created: - description: "When the container was created" + description: |- + Date and time at which the container was created as a Unix timestamp + (number of seconds since EPOCH). type: "integer" format: "int64" + example: "1739811096" Ports: - description: "The ports exposed by this container" + description: |- + Port-mappings for the container. type: "array" items: $ref: "#/definitions/Port" SizeRw: - description: "The size of files that have been created or changed by this container" + description: |- + The size of files that have been created or changed by this container. + + This field is omitted by default, and only set when size is requested + in the API request. type: "integer" format: "int64" + x-nullable: true + example: "122880" SizeRootFs: - description: "The total size of all the files in this container" + description: |- + The total size of all files in the read-only layers from the image + that the container uses. These layers can be shared between containers. + + This field is omitted by default, and only set when size is requested + in the API request. type: "integer" format: "int64" + x-nullable: true + example: "1653948416" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" State: - description: "The state of this container (e.g. `Exited`)" + description: | + The state of this container. type: "string" + enum: + - "created" + - "running" + - "paused" + - "restarting" + - "exited" + - "removing" + - "dead" + example: "running" Status: - description: "Additional human-readable status of this container (e.g. `Exit 0`)" + description: |- + Additional human-readable status of this container (e.g. `Exit 0`) type: "string" + example: "Up 4 days" HostConfig: type: "object" + description: |- + Summary of host-specific runtime information of the container. This + is a reduced set of information in the container's "HostConfig" as + available in the container "inspect" response. properties: NetworkMode: + description: |- + Networking mode (`host`, `none`, `container:`) or name of the + primary network the container is using. + + This field is primarily for backward compatibility. The container + can be connected to multiple networks for which information can be + found in the `NetworkSettings.Networks` field, which enumerates + settings per network. type: "string" + example: "mynetwork" Annotations: - description: "Arbitrary key-value metadata attached to container" + description: |- + Arbitrary key-value metadata attached to the container. type: "object" x-nullable: true additionalProperties: type: "string" + example: + io.kubernetes.docker.type: "container" + io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" NetworkSettings: - description: "A summary of the container's network settings" + description: |- + Summary of the container's network settings type: "object" properties: Networks: type: "object" + description: |- + Summary of network-settings for each network the container is + attached to. additionalProperties: $ref: "#/definitions/EndpointSettings" Mounts: type: "array" + description: |- + List of mounts used by the container. items: $ref: "#/definitions/MountPoint" @@ -5211,8 +5383,11 @@ definitions: com.example.some-other-label: "some-other-value" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - data to store as secret. + Data is the data to store as a secret, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + It must be empty if the Driver field is set, in which case the data is + loaded from an external secret store. The maximum allowed size is 500KB, + as defined in [MaxSecretSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize). This field is only used to _create_ a secret, and is not returned by other endpoints. @@ -5263,8 +5438,9 @@ definitions: type: "string" Data: description: | - Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) - config data. + Data is the data to store as a config, formatted as a Base64-url-safe-encoded + ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) string. + The maximum allowed size is 1000KB, as defined in [MaxConfigSize](https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize). type: "string" Templating: description: | @@ -6555,6 +6731,17 @@ definitions: description: "The network pool size" type: "integer" example: "24" + FirewallBackend: + $ref: "#/definitions/FirewallInfo" + DiscoveredDevices: + description: | + List of devices discovered by device drivers. + + Each device includes information about its source driver, kind, name, + and additional driver-specific attributes. + type: "array" + items: + $ref: "#/definitions/DeviceInfo" Warnings: description: | List of warnings / informational messages about missing features, or @@ -6638,6 +6825,37 @@ definitions: default: "plugins.moby" example: "plugins.moby" + FirewallInfo: + description: | + Information about the daemon's firewalling configuration. + + This field is currently only used on Linux, and omitted on other platforms. + type: "object" + x-nullable: true + properties: + Driver: + description: | + The name of the firewall backend driver. + type: "string" + example: "nftables" + Info: + description: | + Information about the firewall backend, provided as + "label" / "value" pairs. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["ReloadedAt", "2025-01-01T00:00:00Z"] + # PluginsInfo is a temp struct holding Plugins name # registered with docker daemon. It is used by Info struct PluginsInfo: @@ -6683,32 +6901,6 @@ definitions: type: "object" x-nullable: true properties: - AllowNondistributableArtifactsCIDRs: - description: | - List of IP ranges to which nondistributable artifacts can be pushed, - using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). - -


- - > **Deprecated**: Pushing nondistributable artifacts is now always enabled - > and this field is always `null`. This field will be removed in a API v1.49. - type: "array" - items: - type: "string" - example: [] - AllowNondistributableArtifactsHostnames: - description: | - List of registry hostnames to which nondistributable artifacts can be - pushed, using the format `[:]` or `[:]`. - -


- - > **Deprecated**: Pushing nondistributable artifacts is now always enabled - > and this field is always `null`. This field will be removed in a API v1.49. - type: "array" - items: - type: "string" - example: [] InsecureRegistryCIDRs: description: | List of IP ranges of insecure registries, using the CIDR syntax @@ -6878,13 +7070,6 @@ definitions: description: "Actual commit ID of external tool." type: "string" example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" - Expected: - description: | - Commit ID of external tool expected by dockerd as set at build time. - - **Deprecated**: This field is deprecated and will be omitted in a API v1.49. - type: "string" - example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" SwarmInfo: description: | @@ -7053,7 +7238,7 @@ definitions: description: | The media type of the object this schema refers to. type: "string" - example: "application/vnd.docker.distribution.manifest.v2+json" + example: "application/vnd.oci.image.manifest.v1+json" digest: description: | The digest of the targeted content. @@ -7064,27 +7249,52 @@ definitions: The size in bytes of the blob. type: "integer" format: "int64" - example: 3987495 - # TODO Not yet including these fields for now, as they are nil / omitted in our response. - # urls: - # description: | - # List of URLs from which this object MAY be downloaded. - # type: "array" - # items: - # type: "string" - # format: "uri" - # annotations: - # description: | - # Arbitrary metadata relating to the targeted content. - # type: "object" - # additionalProperties: - # type: "string" - # platform: - # $ref: "#/definitions/OCIPlatform" + example: 424 + urls: + description: |- + List of URLs from which this object MAY be downloaded. + type: "array" + items: + type: "string" + format: "uri" + x-nullable: true + annotations: + description: |- + Arbitrary metadata relating to the targeted content. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + "com.docker.official-images.bashbrew.arch": "amd64" + "org.opencontainers.image.base.digest": "sha256:0d0ef5c914d3ea700147da1bd050c59edb8bb12ca312f3800b29d7c8087eabd8" + "org.opencontainers.image.base.name": "scratch" + "org.opencontainers.image.created": "2025-01-27T00:00:00Z" + "org.opencontainers.image.revision": "9fabb4bad5138435b01857e2fe9363e2dc5f6a79" + "org.opencontainers.image.source": "https://git.launchpad.net/cloud-images/+oci/ubuntu-base" + "org.opencontainers.image.url": "https://hub.docker.com/_/ubuntu" + "org.opencontainers.image.version": "24.04" + data: + type: string + x-nullable: true + description: |- + Data is an embedding of the targeted content. This is encoded as a base64 + string when marshalled to JSON (automatically, by encoding/json). If + present, Data can be used directly to avoid fetching the targeted content. + example: null + platform: + $ref: "#/definitions/OCIPlatform" + artifactType: + description: |- + ArtifactType is the IANA media type of this artifact. + type: "string" + x-nullable: true + example: null OCIPlatform: type: "object" x-go-name: Platform + x-nullable: true description: | Describes the platform which the image in the manifest runs on, as defined in the [OCI Image Index Specification](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-index.md). @@ -7554,143 +7764,6 @@ paths: type: "array" items: $ref: "#/definitions/ContainerSummary" - examples: - application/json: - - Id: "8dfafdbc3a40" - Names: - - "/boring_feynman" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 1" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: - - PrivatePort: 2222 - PublicPort: 3333 - Type: "tcp" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:02" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" - - Id: "9cd87474be90" - Names: - - "/coolName" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 222222" - Created: 1367854155 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.docker.type: "container" - io.kubernetes.sandbox.id: "3befe639bed0fd6afdd65fd1fa84506756f59360ec4adc270b0fdac9be22b4d3" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.8" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:08" - Mounts: [] - - Id: "3176a2479c92" - Names: - - "/sleepy_dog" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 3333333333333333" - Created: 1367854154 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.image.id: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - io.kubernetes.image.name: "ubuntu:latest" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.6" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:06" - Mounts: [] - - Id: "4cb07b47f9fb" - Names: - - "/running_cat" - Image: "ubuntu:latest" - ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" - Command: "echo 444444444444444444444444444444444" - Created: 1367854152 - State: "Exited" - Status: "Exit 0" - Ports: [] - Labels: {} - SizeRw: 12288 - SizeRootFs: 0 - HostConfig: - NetworkMode: "default" - Annotations: - io.kubernetes.config.source: "api" - NetworkSettings: - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.5" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:11:00:05" - Mounts: [] 400: description: "bad parameter" schema: @@ -7915,246 +7988,7 @@ paths: 200: description: "no error" schema: - type: "object" - title: "ContainerInspectResponse" - properties: - Id: - description: "The ID of the container" - type: "string" - Created: - description: "The time the container was created" - type: "string" - Path: - description: "The path to the command being run" - type: "string" - Args: - description: "The arguments to the command being run" - type: "array" - items: - type: "string" - State: - $ref: "#/definitions/ContainerState" - Image: - description: "The container's image ID" - type: "string" - ResolvConfPath: - type: "string" - HostnamePath: - type: "string" - HostsPath: - type: "string" - LogPath: - type: "string" - Name: - type: "string" - RestartCount: - type: "integer" - Driver: - type: "string" - Platform: - type: "string" - ImageManifestDescriptor: - $ref: "#/definitions/OCIDescriptor" - description: | - OCI descriptor of the platform-specific manifest of the image - the container was created from. - - Note: Only available if the daemon provides a multi-platform - image store. - MountLabel: - type: "string" - ProcessLabel: - type: "string" - AppArmorProfile: - type: "string" - ExecIDs: - description: "IDs of exec instances that are running in the container." - type: "array" - items: - type: "string" - x-nullable: true - HostConfig: - $ref: "#/definitions/HostConfig" - GraphDriver: - $ref: "#/definitions/DriverData" - SizeRw: - description: | - The size of files that have been created or changed by this - container. - type: "integer" - format: "int64" - SizeRootFs: - description: "The total size of all the files in this container." - type: "integer" - format: "int64" - Mounts: - type: "array" - items: - $ref: "#/definitions/MountPoint" - Config: - $ref: "#/definitions/ContainerConfig" - NetworkSettings: - $ref: "#/definitions/NetworkSettings" - examples: - application/json: - AppArmorProfile: "" - Args: - - "-c" - - "exit 9" - Config: - AttachStderr: true - AttachStdin: false - AttachStdout: true - Cmd: - - "/bin/sh" - - "-c" - - "exit 9" - Domainname: "" - Env: - - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - Healthcheck: - Test: ["CMD-SHELL", "exit 0"] - Hostname: "ba033ac44011" - Image: "ubuntu" - Labels: - com.example.vendor: "Acme" - com.example.license: "GPL" - com.example.version: "1.0" - MacAddress: "" - NetworkDisabled: false - OpenStdin: false - StdinOnce: false - Tty: false - User: "" - Volumes: - /volumes/data: {} - WorkingDir: "" - StopSignal: "SIGTERM" - StopTimeout: 10 - Created: "2015-01-06T15:47:31.485331387Z" - Driver: "overlay2" - ExecIDs: - - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" - - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" - HostConfig: - MaximumIOps: 0 - MaximumIOBps: 0 - BlkioWeight: 0 - BlkioWeightDevice: - - {} - BlkioDeviceReadBps: - - {} - BlkioDeviceWriteBps: - - {} - BlkioDeviceReadIOps: - - {} - BlkioDeviceWriteIOps: - - {} - ContainerIDFile: "" - CpusetCpus: "" - CpusetMems: "" - CpuPercent: 80 - CpuShares: 0 - CpuPeriod: 100000 - CpuRealtimePeriod: 1000000 - CpuRealtimeRuntime: 10000 - Devices: [] - DeviceRequests: - - Driver: "nvidia" - Count: -1 - DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] - Capabilities: [["gpu", "nvidia", "compute"]] - Options: - property1: "string" - property2: "string" - IpcMode: "" - Memory: 0 - MemorySwap: 0 - MemoryReservation: 0 - OomKillDisable: false - OomScoreAdj: 500 - NetworkMode: "bridge" - PidMode: "" - PortBindings: {} - Privileged: false - ReadonlyRootfs: false - PublishAllPorts: false - RestartPolicy: - MaximumRetryCount: 2 - Name: "on-failure" - LogConfig: - Type: "json-file" - Sysctls: - net.ipv4.ip_forward: "1" - Ulimits: - - {} - VolumeDriver: "" - ShmSize: 67108864 - HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" - HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" - LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" - Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" - Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" - MountLabel: "" - Name: "/boring_euclid" - NetworkSettings: - Bridge: "" - SandboxID: "" - HairpinMode: false - LinkLocalIPv6Address: "" - LinkLocalIPv6PrefixLen: 0 - SandboxKey: "" - EndpointID: "" - Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - IPAddress: "" - IPPrefixLen: 0 - IPv6Gateway: "" - MacAddress: "" - Networks: - bridge: - NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" - EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" - Gateway: "172.17.0.1" - IPAddress: "172.17.0.2" - IPPrefixLen: 16 - IPv6Gateway: "" - GlobalIPv6Address: "" - GlobalIPv6PrefixLen: 0 - MacAddress: "02:42:ac:12:00:02" - Path: "/bin/sh" - ProcessLabel: "" - ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" - RestartCount: 1 - State: - Error: "" - ExitCode: 9 - FinishedAt: "2015-01-06T15:47:32.080254511Z" - Health: - Status: "healthy" - FailingStreak: 0 - Log: - - Start: "2019-12-22T10:59:05.6385933Z" - End: "2019-12-22T10:59:05.8078452Z" - ExitCode: 0 - Output: "" - OOMKilled: false - Dead: false - Paused: false - Pid: 0 - Restarting: false - Running: true - StartedAt: "2015-01-06T15:47:32.072697474Z" - Status: "running" - Mounts: - - Name: "fac362...80535" - Source: "/data" - Destination: "/data" - Driver: "local" - Mode: "ro,Z" - RW: false - Propagation: "" + $ref: "#/definitions/ContainerInspectResponse" 404: description: "no such container" schema: @@ -9984,6 +9818,18 @@ paths: description: "Do not delete untagged parent images" type: "boolean" default: false + - name: "platforms" + in: "query" + description: | + Select platform-specific content to delete. + Multiple values are accepted. + Each platform is a OCI platform encoded as a JSON string. + type: "array" + items: + # This should be OCIPlatform + # but $ref is not supported for array in query in Swagger 2.0 + # $ref: "#/definitions/OCIPlatform" + type: "string" tags: ["Image"] /images/search: get: @@ -10541,13 +10387,9 @@ paths: ### Image tarball format - An image tarball contains one directory per image layer (named using its long ID), each containing these files: + An image tarball contains [Content as defined in the OCI Image Layout Specification](https://github.com/opencontainers/image-spec/blob/v1.1.1/image-layout.md#content). - - `VERSION`: currently `1.0` - the file format version - - `json`: detailed layer information, similar to `docker inspect layer_id` - - `layer.tar`: A tarfile containing the filesystem changes in this layer - - The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + Additionally, includes the manifest.json file associated with a backwards compatible docker save format. If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. @@ -10587,6 +10429,7 @@ paths: If not provided, the full multi-platform image will be saved. Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` + tags: ["Image"] /images/get: get: summary: "Export several images" @@ -10621,6 +10464,16 @@ paths: type: "array" items: type: "string" + - name: "platform" + type: "string" + in: "query" + description: | + JSON encoded OCI platform describing a platform which will be used + to select a platform-specific image to be saved if the image is + multi-platform. + If not provided, the full multi-platform image will be saved. + + Example: `{"os": "linux", "architecture": "arm", "variant": "v5"}` tags: ["Image"] /images/load: post: @@ -10719,6 +10572,7 @@ paths: items: type: "integer" minimum: 0 + example: [80, 64] DetachKeys: type: "string" description: | @@ -10805,9 +10659,11 @@ paths: Detach: type: "boolean" description: "Detach from the command." + example: false Tty: type: "boolean" description: "Allocate a pseudo-TTY." + example: true ConsoleSize: type: "array" description: "Initial console size, as an `[height, width]` array." @@ -10817,10 +10673,7 @@ paths: items: type: "integer" minimum: 0 - example: - Detach: false - Tty: true - ConsoleSize: [80, 64] + example: [80, 64] - name: "id" in: "path" description: "Exec instance ID" diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/build.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/build.go new file mode 100644 index 000000000..c43a0e21e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/build.go @@ -0,0 +1,91 @@ +package build + +import ( + "io" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/registry" +) + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// Result contains the image id of a successful build. +type Result struct { + ID string +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*container.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]registry.AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the underlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/cache.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/cache.go new file mode 100644 index 000000000..42c840457 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/cache.go @@ -0,0 +1,52 @@ +package build + +import ( + "time" + + "github.com/docker/docker/api/types/filters" +) + +// CacheRecord contains information about a build cache record. +type CacheRecord struct { + // ID is the unique ID of the build cache record. + ID string + // Parent is the ID of the parent build cache record. + // + // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. + Parent string `json:"Parent,omitempty"` + // Parents is the list of parent build cache record IDs. + Parents []string `json:" Parents,omitempty"` + // Type is the cache record type. + Type string + // Description is a description of the build-step that produced the build cache. + Description string + // InUse indicates if the build cache is in use. + InUse bool + // Shared indicates if the build cache is shared. + Shared bool + // Size is the amount of disk space used by the build cache (in bytes). + Size int64 + // CreatedAt is the date and time at which the build cache was created. + CreatedAt time.Time + // LastUsedAt is the date and time at which the build cache was last used. + LastUsedAt *time.Time + UsageCount int +} + +// CachePruneOptions hold parameters to prune the build cache. +type CachePruneOptions struct { + All bool + ReservedSpace int64 + MaxUsedSpace int64 + MinFreeSpace int64 + Filters filters.Args + + KeepStorage int64 // Deprecated: deprecated in API 1.48. +} + +// CachePruneReport contains the response for Engine API: +// POST "/build/prune" +type CachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/disk_usage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/disk_usage.go new file mode 100644 index 000000000..e969b6d61 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/build/disk_usage.go @@ -0,0 +1,8 @@ +package build + +// CacheDiskUsage contains disk usage for the build cache. +type CacheDiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*CacheRecord +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go index dce8260f3..a3704edd3 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/client.go @@ -3,12 +3,7 @@ package types // import "github.com/docker/docker/api/types" import ( "bufio" "context" - "io" "net" - - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" ) // NewHijackedResponse initializes a [HijackedResponse] type. @@ -51,165 +46,6 @@ func (h *HijackedResponse) CloseWrite() error { return nil } -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - NetworkMode string - ShmSize int64 - Dockerfile string - Ulimits []*container.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. - BuildArgs map[string]*string - AuthConfigs map[string]registry.AuthConfig - Context io.Reader - Labels map[string]string - // squash the resulting image's layers to the parent - // preserves the original image and creates a new one from the parent with all - // the changes applied to a single layer - Squash bool - // CacheFrom specifies images that are used for matching cache. Images - // specified here do not need to have a valid parent chain to match cache. - CacheFrom []string - SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string - // Version specifies the version of the underlying builder to use - Version BuilderVersion - // BuildID is an optional identifier that can be passed together with the - // build request. The same identifier can be used to gracefully cancel the - // build with the cancel request. - BuildID string - // Outputs defines configurations for exporting build results. Only supported - // in BuildKit mode - Outputs []ImageBuildOutput -} - -// ImageBuildOutput defines configuration for exporting a build result -type ImageBuildOutput struct { - Type string - Attrs map[string]string -} - -// BuilderVersion sets the version of underlying builder to use -type BuilderVersion string - -const ( - // BuilderV1 is the first generation builder in docker daemon - BuilderV1 BuilderVersion = "1" - // BuilderBuildKit is builder based on moby/buildkit project - BuilderBuildKit BuilderVersion = "2" -) - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filters filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// Values for RegistryAuthFrom in ServiceUpdateOptions -const ( - RegistryAuthFromSpec = "spec" - RegistryAuthFromPreviousSpec = "previous-spec" -) - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. - - // RegistryAuthFrom specifies where to find the registry authorization - // credentials if they are not given in EncodedRegistryAuth. Valid - // values are "spec" and "previous-spec". - RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filters filters.Args - - // Status indicates whether the server should include the service task - // count of running and desired tasks. - Status bool -} - -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filters filters.Args -} - // PluginRemoveOptions holds parameters to remove plugins. type PluginRemoveOptions struct { Force bool @@ -243,13 +79,6 @@ type PluginInstallOptions struct { Args []string } -// SwarmUnlockKeyResponse contains the response for Engine API: -// GET /swarm/unlockkey -type SwarmUnlockKeyResponse struct { - // UnlockKey is the unlock key in ASCII-armored format. - UnlockKey string -} - // PluginCreateOptions hold all options to plugin create. type PluginCreateOptions struct { RepoName string diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container.go index 65fabbf42..a191ca8bd 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/container.go @@ -104,7 +104,7 @@ type MountPoint struct { // State stores container's running state // it's part of ContainerJSONBase and returned by "inspect" command type State struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Status ContainerState // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" Running bool Paused bool Restarting bool @@ -132,7 +132,7 @@ type Summary struct { SizeRw int64 `json:",omitempty"` SizeRootFs int64 `json:",omitempty"` Labels map[string]string - State string + State ContainerState Status string HostConfig struct { NetworkMode string `json:",omitempty"` diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/disk_usage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/disk_usage.go new file mode 100644 index 000000000..05b6cbe9c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/disk_usage.go @@ -0,0 +1,8 @@ +package container + +// DiskUsage contains disk usage for containers. +type DiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*Summary +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/health.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/health.go index 93663746f..96e91cc8d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/health.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/health.go @@ -1,18 +1,27 @@ package container -import "time" +import ( + "fmt" + "strings" + "time" +) + +// HealthStatus is a string representation of the container's health. +// +// It currently is an alias for string, but may become a distinct type in future. +type HealthStatus = string // Health states const ( - NoHealthcheck = "none" // Indicates there is no healthcheck - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem + NoHealthcheck HealthStatus = "none" // Indicates there is no healthcheck + Starting HealthStatus = "starting" // Starting indicates that the container is not yet ready + Healthy HealthStatus = "healthy" // Healthy indicates that the container is running correctly + Unhealthy HealthStatus = "unhealthy" // Unhealthy indicates that the container has a problem ) // Health stores information about the container's healthcheck results type Health struct { - Status string // Status is one of [Starting], [Healthy] or [Unhealthy]. + Status HealthStatus // Status is one of [Starting], [Healthy] or [Unhealthy]. FailingStreak int // FailingStreak is the number of consecutive failures Log []*HealthcheckResult // Log contains the last few results (oldest first) } @@ -24,3 +33,18 @@ type HealthcheckResult struct { ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe Output string // Output from last check } + +var validHealths = []string{ + NoHealthcheck, Starting, Healthy, Unhealthy, +} + +// ValidateHealthStatus checks if the provided string is a valid +// container [HealthStatus]. +func ValidateHealthStatus(s HealthStatus) error { + switch s { + case NoHealthcheck, Starting, Healthy, Unhealthy: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for health (%s): must be one of %s", s, strings.Join(validHealths, ", "))} + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/hostconfig.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/hostconfig.go index 83198305e..87ca82683 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/hostconfig.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/hostconfig.go @@ -145,7 +145,7 @@ func (n NetworkMode) IsDefault() bool { // IsPrivate indicates whether container uses its private network stack. func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) + return !n.IsHost() && !n.IsContainer() } // IsContainer indicates whether container uses a container network stack. @@ -230,7 +230,7 @@ type PidMode string // IsPrivate indicates whether the container uses its own new pid namespace. func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) + return !n.IsHost() && !n.IsContainer() } // IsHost indicates whether the container uses the host's pid namespace. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/state.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/state.go new file mode 100644 index 000000000..78d5c4fe8 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/container/state.go @@ -0,0 +1,64 @@ +package container + +import ( + "fmt" + "strings" +) + +// ContainerState is a string representation of the container's current state. +// +// It currently is an alias for string, but may become a distinct type in the future. +type ContainerState = string + +const ( + StateCreated ContainerState = "created" // StateCreated indicates the container is created, but not (yet) started. + StateRunning ContainerState = "running" // StateRunning indicates that the container is running. + StatePaused ContainerState = "paused" // StatePaused indicates that the container's current state is paused. + StateRestarting ContainerState = "restarting" // StateRestarting indicates that the container is currently restarting. + StateRemoving ContainerState = "removing" // StateRemoving indicates that the container is being removed. + StateExited ContainerState = "exited" // StateExited indicates that the container exited. + StateDead ContainerState = "dead" // StateDead indicates that the container failed to be deleted. Containers in this state are attempted to be cleaned up when the daemon restarts. +) + +var validStates = []ContainerState{ + StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead, +} + +// ValidateContainerState checks if the provided string is a valid +// container [ContainerState]. +func ValidateContainerState(s ContainerState) error { + switch s { + case StateCreated, StateRunning, StatePaused, StateRestarting, StateRemoving, StateExited, StateDead: + return nil + default: + return errInvalidParameter{error: fmt.Errorf("invalid value for state (%s): must be one of %s", s, strings.Join(validStates, ", "))} + } +} + +// StateStatus is used to return container wait results. +// Implements exec.ExitCode interface. +// This type is needed as State include a sync.Mutex field which make +// copying it unsafe. +type StateStatus struct { + exitCode int + err error +} + +// ExitCode returns current exitcode for the state. +func (s StateStatus) ExitCode() int { + return s.exitCode +} + +// Err returns current error for the state. Returns nil if the container had +// exited on its own. +func (s StateStatus) Err() error { + return s.err +} + +// NewStateStatus returns a new StateStatus with the given exit code and error. +func NewStateStatus(exitCode int, err error) StateStatus { + return StateStatus{ + exitCode: exitCode, + err: err, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/disk_usage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/disk_usage.go new file mode 100644 index 000000000..b29d925ca --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/disk_usage.go @@ -0,0 +1,8 @@ +package image + +// DiskUsage contains disk usage for images. +type DiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*Summary +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_inspect.go index 78e81f052..3bdb47428 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/image_inspect.go @@ -3,6 +3,7 @@ package image import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/storage" + dockerspec "github.com/moby/docker-image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -84,7 +85,7 @@ type InspectResponse struct { // Author is the name of the author that was specified when committing the // image, or as specified through MAINTAINER (deprecated) in the Dockerfile. Author string - Config *container.Config + Config *dockerspec.DockerOCIImageConfig // Architecture is the hardware CPU architecture that the image runs on. Architecture string @@ -128,11 +129,12 @@ type InspectResponse struct { // compatibility. Descriptor *ocispec.Descriptor `json:"Descriptor,omitempty"` - // Manifests is a list of image manifests available in this image. It + // Manifests is a list of image manifests available in this image. It // provides a more detailed view of the platform-specific image manifests or // other image-attached data like build attestations. // - // Only available if the daemon provides a multi-platform image store. + // Only available if the daemon provides a multi-platform image store, the client + // requests manifests AND does not request a specific platform. // // WARNING: This is experimental and may change at any time without any backward // compatibility. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/opts.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/opts.go index 919510fe3..fd038557c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/image/opts.go @@ -83,6 +83,7 @@ type ListOptions struct { // RemoveOptions holds parameters to remove images. type RemoveOptions struct { + Platforms []ocispec.Platform Force bool PruneChildren bool } @@ -106,6 +107,11 @@ type LoadOptions struct { type InspectOptions struct { // Manifests returns the image manifests. Manifests bool + + // Platform selects the specific platform of a multi-platform image to inspect. + // + // This option is only available for API version 1.49 and up. + Platform *ocispec.Platform } // SaveOptions holds parameters to save images. diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/registry/registry.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/registry/registry.go index b0a4d604f..14c82aaa6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -1,3 +1,6 @@ +// FIXME(thaJeztah): remove once we are a module; the go:build directive prevents go from downgrading language version to go1.16: +//go:build go1.23 + package registry // import "github.com/docker/docker/api/types/registry" import ( @@ -15,23 +18,26 @@ type ServiceConfig struct { InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` Mirrors []string + + // ExtraFields is for internal use to include deprecated fields on older API versions. + ExtraFields map[string]any `json:"-"` } // MarshalJSON implements a custom marshaler to include legacy fields // in API responses. -func (sc ServiceConfig) MarshalJSON() ([]byte, error) { - tmp := map[string]interface{}{ - "InsecureRegistryCIDRs": sc.InsecureRegistryCIDRs, - "IndexConfigs": sc.IndexConfigs, - "Mirrors": sc.Mirrors, +func (sc *ServiceConfig) MarshalJSON() ([]byte, error) { + type tmp ServiceConfig + base, err := json.Marshal((*tmp)(sc)) + if err != nil { + return nil, err } - if sc.AllowNondistributableArtifactsCIDRs != nil { - tmp["AllowNondistributableArtifactsCIDRs"] = nil + var merged map[string]any + _ = json.Unmarshal(base, &merged) + + for k, v := range sc.ExtraFields { + merged[k] = v } - if sc.AllowNondistributableArtifactsHostnames != nil { - tmp["AllowNondistributableArtifactsHostnames"] = nil - } - return json.Marshal(tmp) + return json.Marshal(merged) } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -49,15 +55,17 @@ func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { } // UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) error { var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } + if err := json.Unmarshal(b, &ipnetStr); err != nil { + return err } - return + _, cidr, err := net.ParseCIDR(ipnetStr) + if err != nil { + return err + } + *ipnet = NetIPNet(*cidr) + return nil } // IndexInfo contains information about a registry diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go index 16202ccce..bdec82ffb 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -1,6 +1,10 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "os" +import ( + "os" + + "github.com/docker/docker/api/types/filters" +) // Config represents a config. type Config struct { @@ -12,6 +16,12 @@ type Config struct { // ConfigSpec represents a config specification from a config in swarm type ConfigSpec struct { Annotations + + // Data is the data to store as a config. + // + // The maximum allowed size is 1000KB, as defined in [MaxConfigSize]. + // + // [MaxConfigSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/manager/controlapi#MaxConfigSize Data []byte `json:",omitempty"` // Templating controls whether and how to evaluate the config payload as @@ -38,3 +48,15 @@ type ConfigReference struct { ConfigID string ConfigName string } + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/node.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/node.go index bb98d5eed..f175b1b2c 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -1,4 +1,5 @@ package swarm // import "github.com/docker/docker/api/types/swarm" +import "github.com/docker/docker/api/types/filters" // Node represents a node. type Node struct { @@ -137,3 +138,13 @@ const ( type Topology struct { Segments map[string]string `json:",omitempty"` } + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/secret.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/secret.go index d5213ec98..248cffab8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -1,6 +1,10 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "os" +import ( + "os" + + "github.com/docker/docker/api/types/filters" +) // Secret represents a secret. type Secret struct { @@ -12,8 +16,22 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Data is the data to store as a secret. It must be empty if a + // [Driver] is used, in which case the data is loaded from an external + // secret store. The maximum allowed size is 500KB, as defined in + // [MaxSecretSize]. + // + // This field is only used to create the secret, and is not returned + // by other endpoints. + // + // [MaxSecretSize]: https://pkg.go.dev/github.com/moby/swarmkit/v2@v2.0.0-20250103191802-8c1959736554/api/validation#MaxSecretSize + Data []byte `json:",omitempty"` + + // Driver is the name of the secrets driver used to fetch the secret's + // value from an external secret store. If not set, the default built-in + // store is used. + Driver *Driver `json:",omitempty"` // Templating controls whether and how to evaluate the secret payload as // a template. If it is not set, no templating is used. @@ -34,3 +52,15 @@ type SecretReference struct { SecretID string SecretName string } + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/service.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/service.go index 5b6d5ec12..1d0a9a47b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -1,6 +1,10 @@ package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/filters" +) // Service represents a service. type Service struct { @@ -200,3 +204,69 @@ type JobStatus struct { // Swarm manager. LastExecution time.Time `json:",omitempty"` } + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 1b4be6fff..2711c9eac 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -235,3 +235,10 @@ type UpdateFlags struct { RotateManagerToken bool RotateManagerUnlockKey bool } + +// UnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type UnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go index ad3eeca0b..722d1ceb6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -3,6 +3,7 @@ package swarm // import "github.com/docker/docker/api/types/swarm" import ( "time" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm/runtime" ) @@ -223,3 +224,8 @@ type VolumeAttachment struct { // in the ContainerSpec, that this volume fulfills. Target string `json:",omitempty"` } + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/disk_usage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/disk_usage.go new file mode 100644 index 000000000..99078cf19 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/disk_usage.go @@ -0,0 +1,17 @@ +package system + +import ( + "github.com/docker/docker/api/types/build" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/volume" +) + +// DiskUsage contains response of Engine API for API 1.49 and greater: +// GET "/system/df" +type DiskUsage struct { + Images *image.DiskUsage + Containers *container.DiskUsage + Volumes *volume.DiskUsage + BuildCache *build.CacheDiskUsage +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/info.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/info.go index 8a2444da2..047639ed9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/info.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/system/info.go @@ -29,8 +29,6 @@ type Info struct { CPUSet bool PidsLimit bool IPv4Forwarding bool - BridgeNfIptables bool `json:"BridgeNfIptables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release. - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` // Deprecated: netfilter module is now loaded on-demand and no longer during daemon startup, making this field obsolete. This field is always false and will be removed in the next release. Debug bool NFd int OomKillDisable bool @@ -73,7 +71,9 @@ type Info struct { SecurityOptions []string ProductLicense string `json:",omitempty"` DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + FirewallBackend *FirewallInfo `json:"FirewallBackend,omitempty"` CDISpecDirs []string + DiscoveredDevices []DeviceInfo `json:",omitempty"` Containerd *ContainerdInfo `json:",omitempty"` @@ -143,7 +143,7 @@ type Commit struct { // Expected is the commit ID of external tool expected by dockerd as set at build time. // // Deprecated: this field is no longer used in API v1.49, but kept for backward-compatibility with older API versions. - Expected string + Expected string `json:",omitempty"` } // NetworkAddressPool is a temp struct used by [Info] struct. @@ -151,3 +151,20 @@ type NetworkAddressPool struct { Base string Size int } + +// FirewallInfo describes the firewall backend. +type FirewallInfo struct { + // Driver is the name of the firewall backend driver. + Driver string `json:"Driver"` + // Info is a list of label/value pairs, containing information related to the firewall. + Info [][2]string `json:"Info,omitempty"` +} + +// DeviceInfo represents a discoverable device from a device driver. +type DeviceInfo struct { + // Source indicates the origin device driver. + Source string `json:"Source"` + // ID is the unique identifier for the device. + // Example: CDI FQDN like "vendor.com/gpu=0", or other driver-specific device ID + ID string `json:"ID"` +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/time/timestamp.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/time/timestamp.go index cab5c32e3..edd1d6ecb 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -30,7 +30,7 @@ func GetTimestamp(value string, reference time.Time) (string, error) { var format string // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + parseInLocation := !strings.ContainsAny(value, "zZ+") && strings.Count(value, "-") != 3 if strings.Contains(value, ".") { if parseInLocation { @@ -105,23 +105,23 @@ func GetTimestamp(value string, reference time.Time) (string, error) { // since := time.Unix(seconds, nanoseconds) // // returns seconds as defaultSeconds if value == "" -func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, err error) { +func ParseTimestamps(value string, defaultSeconds int64) (seconds int64, nanoseconds int64, _ error) { if value == "" { return defaultSeconds, 0, nil } return parseTimestamp(value) } -func parseTimestamp(value string) (sec int64, nsec int64, err error) { +func parseTimestamp(value string) (seconds int64, nanoseconds int64, _ error) { s, n, ok := strings.Cut(value, ".") - sec, err = strconv.ParseInt(s, 10, 64) + sec, err := strconv.ParseInt(s, 10, 64) if err != nil { return sec, 0, err } if !ok { return sec, 0, nil } - nsec, err = strconv.ParseInt(n, 10, 64) + nsec, err := strconv.ParseInt(n, 10, 64) if err != nil { return sec, nsec, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go index 82ae339c3..443cf82a9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types.go @@ -1,10 +1,8 @@ package types // import "github.com/docker/docker/api/types" import ( - "time" - + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" @@ -24,7 +22,7 @@ type Ping struct { APIVersion string OSType string Experimental bool - BuilderVersion BuilderVersion + BuilderVersion build.BuilderVersion // SwarmStatus provides information about the current swarm status of the // engine, obtained from the "Swarm" header in the API response. @@ -91,41 +89,10 @@ type DiskUsage struct { Images []*image.Summary Containers []*container.Summary Volumes []*volume.Volume - BuildCache []*BuildCache + BuildCache []*build.CacheRecord BuilderSize int64 `json:",omitempty"` // Deprecated: deprecated in API 1.38, and no longer used since API 1.40. } -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { - CachesDeleted []string - SpaceReclaimed uint64 -} - -// SecretCreateResponse contains the information returned to a client -// on the creation of a new secret. -type SecretCreateResponse struct { - // ID is the id of the created secret. - ID string -} - -// SecretListOptions holds parameters to list secrets -type SecretListOptions struct { - Filters filters.Args -} - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - // PushResult contains the tag, manifest digest, and manifest size from the // push. It's used to signal this information to the trust code in the client // so it can sign the manifest if necessary. @@ -134,46 +101,3 @@ type PushResult struct { Digest string Size int } - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} - -// BuildCache contains information about a build cache record. -type BuildCache struct { - // ID is the unique ID of the build cache record. - ID string - // Parent is the ID of the parent build cache record. - // - // Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead. - Parent string `json:"Parent,omitempty"` - // Parents is the list of parent build cache record IDs. - Parents []string `json:" Parents,omitempty"` - // Type is the cache record type. - Type string - // Description is a description of the build-step that produced the build cache. - Description string - // InUse indicates if the build cache is in use. - InUse bool - // Shared indicates if the build cache is shared. - Shared bool - // Size is the amount of disk space used by the build cache (in bytes). - Size int64 - // CreatedAt is the date and time at which the build cache was created. - CreatedAt time.Time - // LastUsedAt is the date and time at which the build cache was last used. - LastUsedAt *time.Time - UsageCount int -} - -// BuildCachePruneOptions hold parameters to prune the build cache -type BuildCachePruneOptions struct { - All bool - ReservedSpace int64 - MaxUsedSpace int64 - MinFreeSpace int64 - Filters filters.Args - - KeepStorage int64 // Deprecated: deprecated in API 1.48. -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types_deprecated.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types_deprecated.go index 93e4336ad..8456a4560 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types_deprecated.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/types_deprecated.go @@ -3,10 +3,12 @@ package types import ( "context" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/common" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/storage" + "github.com/docker/docker/api/types/swarm" ) // IDResponse Response to an API call that returns just an Id. @@ -113,3 +115,127 @@ type ImageInspect = image.InspectResponse // // Deprecated: moved to [github.com/docker/docker/api/types/registry.RequestAuthConfig]. type RequestPrivilegeFunc func(context.Context) (string, error) + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +// +// Deprecated: use [swarm.SecretCreateResponse]. +type SecretCreateResponse = swarm.SecretCreateResponse + +// SecretListOptions holds parameters to list secrets +// +// Deprecated: use [swarm.SecretListOptions]. +type SecretListOptions = swarm.SecretListOptions + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +// +// Deprecated: use [swarm.ConfigCreateResponse]. +type ConfigCreateResponse = swarm.ConfigCreateResponse + +// ConfigListOptions holds parameters to list configs +// +// Deprecated: use [swarm.ConfigListOptions]. +type ConfigListOptions = swarm.ConfigListOptions + +// NodeListOptions holds parameters to list nodes with. +// +// Deprecated: use [swarm.NodeListOptions]. +type NodeListOptions = swarm.NodeListOptions + +// NodeRemoveOptions holds parameters to remove nodes with. +// +// Deprecated: use [swarm.NodeRemoveOptions]. +type NodeRemoveOptions = swarm.NodeRemoveOptions + +// TaskListOptions holds parameters to list tasks with. +// +// Deprecated: use [swarm.TaskListOptions]. +type TaskListOptions = swarm.TaskListOptions + +// ServiceCreateOptions contains the options to use when creating a service. +// +// Deprecated: use [swarm.ServiceCreateOptions]. +type ServiceCreateOptions = swarm.ServiceCreateOptions + +// ServiceUpdateOptions contains the options to be used for updating services. +// +// Deprecated: use [swarm.ServiceCreateOptions]. +type ServiceUpdateOptions = swarm.ServiceUpdateOptions + +const ( + RegistryAuthFromSpec = swarm.RegistryAuthFromSpec // Deprecated: use [swarm.RegistryAuthFromSpec]. + RegistryAuthFromPreviousSpec = swarm.RegistryAuthFromPreviousSpec // Deprecated: use [swarm.RegistryAuthFromPreviousSpec]. +) + +// ServiceListOptions holds parameters to list services with. +// +// Deprecated: use [swarm.ServiceListOptions]. +type ServiceListOptions = swarm.ServiceListOptions + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +// +// Deprecated: use [swarm.ServiceInspectOptions]. +type ServiceInspectOptions = swarm.ServiceInspectOptions + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +// +// Deprecated: use [swarm.UnlockKeyResponse]. +type SwarmUnlockKeyResponse = swarm.UnlockKeyResponse + +// BuildCache contains information about a build cache record. +// +// Deprecated: deprecated in API 1.49. Use [build.CacheRecord] instead. +type BuildCache = build.CacheRecord + +// BuildCachePruneOptions hold parameters to prune the build cache +// +// Deprecated: use [build.CachePruneOptions]. +type BuildCachePruneOptions = build.CachePruneOptions + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +// +// Deprecated: use [build.CachePruneReport]. +type BuildCachePruneReport = build.CachePruneReport + +// BuildResult contains the image id of a successful build/ +// +// Deprecated: use [build.Result]. +type BuildResult = build.Result + +// ImageBuildOptions holds the information +// necessary to build images. +// +// Deprecated: use [build.ImageBuildOptions]. +type ImageBuildOptions = build.ImageBuildOptions + +// ImageBuildOutput defines configuration for exporting a build result +// +// Deprecated: use [build.ImageBuildOutput]. +type ImageBuildOutput = build.ImageBuildOutput + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +// +// Deprecated: use [build.ImageBuildResponse]. +type ImageBuildResponse = build.ImageBuildResponse + +// BuilderVersion sets the version of underlying builder to use +// +// Deprecated: use [build.BuilderVersion]. +type BuilderVersion = build.BuilderVersion + +const ( + // BuilderV1 is the first generation builder in docker daemon + // + // Deprecated: use [build.BuilderV1]. + BuilderV1 = build.BuilderV1 + // BuilderBuildKit is builder based on moby/buildkit project + // + // Deprecated: use [build.BuilderBuildKit]. + BuilderBuildKit = build.BuilderBuildKit +) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/disk_usage.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/disk_usage.go new file mode 100644 index 000000000..3d716c6e0 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/api/types/volume/disk_usage.go @@ -0,0 +1,8 @@ +package volume + +// DiskUsage contains disk usage for volumes. +type DiskUsage struct { + TotalSize int64 + Reclaimable int64 + Items []*Volume +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go index 92b47d183..db8fad55d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/build_prune.go @@ -6,13 +6,13 @@ import ( "net/url" "strconv" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/filters" "github.com/pkg/errors" ) // BuildCachePrune requests the daemon to delete unused cache data -func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { +func (cli *Client) BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) { if err := cli.NewVersionError(ctx, "1.31", "build prune"); err != nil { return nil, err } @@ -47,7 +47,7 @@ func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePru return nil, err } - report := types.BuildCachePruneReport{} + report := build.CachePruneReport{} if err := json.NewDecoder(resp.Body).Decode(&report); err != nil { return nil, errors.Wrap(err, "error retrieving disk usage") } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_interfaces.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_interfaces.go index f70d8ffa0..fe4b1af9b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_interfaces.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/client_interfaces.go @@ -7,6 +7,7 @@ import ( "net/http" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" @@ -109,8 +110,8 @@ type DistributionAPIClient interface { // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + ImageBuild(ctx context.Context, context io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts build.CachePruneOptions) (*build.CachePruneReport, error) BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options image.CreateOptions) (io.ReadCloser, error) ImageImport(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) (io.ReadCloser, error) @@ -154,8 +155,8 @@ type NetworkAPIClient interface { // NodeAPIClient defines API client methods for the nodes type NodeAPIClient interface { NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error } @@ -175,22 +176,22 @@ type PluginAPIClient interface { // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options swarm.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) ServiceLogs(ctx context.Context, serviceID string, options container.LogsOptions) (io.ReadCloser, error) TaskLogs(ctx context.Context, taskID string, options container.LogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) + TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) } // SwarmAPIClient defines API client methods for the swarm type SwarmAPIClient interface { SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error SwarmLeave(ctx context.Context, force bool) error SwarmInspect(ctx context.Context) (swarm.Swarm, error) @@ -219,8 +220,8 @@ type VolumeAPIClient interface { // SecretAPIClient defines API client methods for secrets type SecretAPIClient interface { - SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) - SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) SecretRemove(ctx context.Context, id string) error SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error @@ -228,8 +229,8 @@ type SecretAPIClient interface { // ConfigAPIClient defines API client methods for configs type ConfigAPIClient interface { - ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) - ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) ConfigRemove(ctx context.Context, id string) error ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go index c7ea6d2eb..1fbfc21f9 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_create.go @@ -4,13 +4,12 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) // ConfigCreate creates a new config. -func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { - var response types.ConfigCreateResponse +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (swarm.ConfigCreateResponse, error) { + var response swarm.ConfigCreateResponse if err := cli.NewVersionError(ctx, "1.30", "config create"); err != nil { return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go index 7e4a8ea56..67779e98a 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/config_list.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // ConfigList returns the list of configs. -func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { +func (cli *Client) ConfigList(ctx context.Context, options swarm.ConfigListOptions) ([]swarm.Config, error) { if err := cli.NewVersionError(ctx, "1.30", "config list"); err != nil { return nil, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go index 9b46a1f32..4838ac71d 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_commit.go @@ -32,7 +32,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, containerID string, opti if tagged, ok := ref.(reference.Tagged); ok { tag = tagged.Tag() } - repository = reference.FamiliarName(ref) + repository = ref.Name() } query := url.Values{} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go index 9b8616f76..9bb106f77 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/container_create.go @@ -3,6 +3,7 @@ package client // import "github.com/docker/docker/client" import ( "context" "encoding/json" + "errors" "net/url" "path" "sort" @@ -54,6 +55,19 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config // When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize hostConfig.ConsoleSize = [2]uint{0, 0} } + if versions.LessThan(cli.ClientVersion(), "1.44") { + for _, m := range hostConfig.Mounts { + if m.BindOptions != nil { + // ReadOnlyNonRecursive can be safely ignored when API < 1.44 + if m.BindOptions.ReadOnlyForceRecursive { + return response, errors.New("bind-recursive=readonly requires API v1.44 or later") + } + if m.BindOptions.NonRecursive && versions.LessThan(cli.ClientVersion(), "1.40") { + return response, errors.New("bind-recursive=disabled requires API v1.40 or later") + } + } + } + } hostConfig.CapAdd = normalizeCapabilities(hostConfig.CapAdd) hostConfig.CapDrop = normalizeCapabilities(hostConfig.CapDrop) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go index 609f92ce6..7bd859314 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/errors.go @@ -4,9 +4,11 @@ import ( "context" "errors" "fmt" + "net/http" + cerrdefs "github.com/containerd/errdefs" + "github.com/containerd/errdefs/pkg/errhttp" "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" ) // errConnectionFailed implements an error returned when connection failed. @@ -48,9 +50,11 @@ func connectionFailed(host string) error { } // IsErrNotFound returns true if the error is a NotFound error, which is returned -// by the API when some object is not found. It is an alias for [errdefs.IsNotFound]. +// by the API when some object is not found. It is an alias for [cerrdefs.IsNotFound]. +// +// Deprecated: use [cerrdefs.IsNotFound] instead. func IsErrNotFound(err error) bool { - return errdefs.IsNotFound(err) + return cerrdefs.IsNotFound(err) } type objectNotFoundError struct { @@ -83,3 +87,43 @@ func (cli *Client) NewVersionError(ctx context.Context, APIrequired, feature str } return nil } + +type httpError struct { + err error + errdef error +} + +func (e *httpError) Error() string { + return e.err.Error() +} + +func (e *httpError) Unwrap() error { + return e.err +} + +func (e *httpError) Is(target error) bool { + return errors.Is(e.errdef, target) +} + +// httpErrorFromStatusCode creates an errdef error, based on the provided HTTP status-code +func httpErrorFromStatusCode(err error, statusCode int) error { + if err == nil { + return nil + } + base := errhttp.ToNative(statusCode) + if base != nil { + return &httpError{err: err, errdef: base} + } + + switch { + case statusCode >= http.StatusOK && statusCode < http.StatusBadRequest: + // it's a client error + return err + case statusCode >= http.StatusBadRequest && statusCode < http.StatusInternalServerError: + return &httpError{err: err, errdef: cerrdefs.ErrInvalidArgument} + case statusCode >= http.StatusInternalServerError && statusCode < 600: + return &httpError{err: err, errdef: cerrdefs.ErrInternal} + default: + return &httpError{err: err, errdef: cerrdefs.ErrUnknown} + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go index 6e2a40687..28b74a3f1 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_build.go @@ -10,7 +10,7 @@ import ( "strconv" "strings" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" ) @@ -18,15 +18,15 @@ import ( // ImageBuild sends a request to the daemon to build images. // The Body in the response implements an io.ReadCloser and it's up to the caller to // close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options build.ImageBuildOptions) (build.ImageBuildResponse, error) { query, err := cli.imageBuildOptionsToQuery(ctx, options) if err != nil { - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } buf, err := json.Marshal(options.AuthConfigs) if err != nil { - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } headers := http.Header{} @@ -35,16 +35,16 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio resp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) if err != nil { - return types.ImageBuildResponse{}, err + return build.ImageBuildResponse{}, err } - return types.ImageBuildResponse{ + return build.ImageBuildResponse{ Body: resp.Body, OSType: getDockerOS(resp.Header.Get("Server")), }, nil } -func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options types.ImageBuildOptions) (url.Values, error) { +func (cli *Client) imageBuildOptionsToQuery(ctx context.Context, options build.ImageBuildOptions) (url.Values, error) { query := url.Values{} if len(options.Tags) > 0 { query["t"] = options.Tags diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_create.go index 0357051e7..1aa061eb0 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_create.go @@ -21,7 +21,7 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti } query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("fromImage", ref.Name()) query.Set("tag", getAPITagFromNamedRef(ref)) if options.Platform != "" { query.Set("platform", strings.ToLower(options.Platform)) diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go index 116119546..d88f0f141 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect.go @@ -32,6 +32,17 @@ func (cli *Client) ImageInspect(ctx context.Context, imageID string, inspectOpts query.Set("manifests", "1") } + if opts.apiOptions.Platform != nil { + if err := cli.NewVersionError(ctx, "1.49", "platform"); err != nil { + return image.InspectResponse{}, err + } + platform, err := encodePlatform(opts.apiOptions.Platform) + if err != nil { + return image.InspectResponse{}, err + } + query.Set("platform", platform) + } + resp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect_opts.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect_opts.go index 2607f3678..655cbf0b7 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect_opts.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_inspect_opts.go @@ -4,6 +4,7 @@ import ( "bytes" "github.com/docker/docker/api/types/image" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ImageInspectOption is a type representing functional options for the image inspect operation. @@ -36,6 +37,17 @@ func ImageInspectWithManifests(manifests bool) ImageInspectOption { }) } +// ImageInspectWithPlatform sets platform API option for the image inspect operation. +// This option is only available for API version 1.49 and up. +// With this option set, the image inspect operation will return information for the +// specified platform variant of the multi-platform image. +func ImageInspectWithPlatform(platform *ocispec.Platform) ImageInspectOption { + return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { + clientOpts.apiOptions.Platform = platform + return nil + }) +} + // ImageInspectWithAPIOpts sets the API options for the image inspect operation. func ImageInspectWithAPIOpts(opts image.InspectOptions) ImageInspectOption { return imageInspectOptionFunc(func(clientOpts *imageInspectOpts) error { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go index 4286942b3..ae8c807f7 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_pull.go @@ -6,9 +6,9 @@ import ( "net/url" "strings" + cerrdefs "github.com/containerd/errdefs" "github.com/distribution/reference" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/errdefs" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -26,7 +26,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P } query := url.Values{} - query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("fromImage", ref.Name()) if !options.All { query.Set("tag", getAPITagFromNamedRef(ref)) } @@ -35,7 +35,7 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options image.P } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go index b340bc4fb..521030440 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_push.go @@ -9,10 +9,10 @@ import ( "net/http" "net/url" + cerrdefs "github.com/containerd/errdefs" "github.com/distribution/reference" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" ) // ImagePush requests the docker host to push an image to a remote registry. @@ -29,7 +29,6 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu return nil, errors.New("cannot push a digest reference") } - name := reference.FamiliarName(ref) query := url.Values{} if !options.All { ref = reference.TagNameOnly(ref) @@ -52,13 +51,13 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options image.Pu query.Set("platform", string(pJson)) } - resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + resp, err := cli.tryImagePush(ctx, ref.Name(), query, options.RegistryAuth) + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return nil, privilegeErr } - resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + resp, err = cli.tryImagePush(ctx, ref.Name(), query, newAuthHeader) } if err != nil { return nil, err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go index b0c87ca09..0d769139b 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_remove.go @@ -19,6 +19,14 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options imag query.Set("noprune", "1") } + if len(options.Platforms) > 0 { + p, err := encodePlatforms(options.Platforms...) + if err != nil { + return nil, err + } + query["platforms"] = p + } + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) defer ensureReaderClosed(resp) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go index 0a7b5ec22..f3aab43a8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_search.go @@ -7,9 +7,9 @@ import ( "net/url" "strconv" + cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" ) // ImageSearch makes the docker host search by a term in a remote registry. @@ -32,7 +32,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options registr resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) defer ensureReaderClosed(resp) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { return results, privilegeErr diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_tag.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_tag.go index ea6b4a1e6..25c7360df 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_tag.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/image_tag.go @@ -26,7 +26,7 @@ func (cli *Client) ImageTag(ctx context.Context, source, target string) error { ref = reference.TagNameOnly(ref) query := url.Values{} - query.Set("repo", reference.FamiliarName(ref)) + query.Set("repo", ref.Name()) if tagged, ok := ref.(reference.Tagged); ok { query.Set("tag", tagged.Tag()) } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go index 2534f4aee..429eec244 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_list.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { +func (cli *Client) NodeList(ctx context.Context, options swarm.NodeListOptions) ([]swarm.Node, error) { query := url.Values{} if options.Filters.Len() > 0 { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go index 81f8fed6b..07d8e6536 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/node_remove.go @@ -4,11 +4,11 @@ import ( "context" "net/url" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" ) // NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options swarm.NodeRemoveOptions) error { nodeID, err := trimID("node", nodeID) if err != nil { return err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go index c7645e56d..2ffa5945f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/ping.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/build" "github.com/docker/docker/api/types/swarm" ) @@ -67,7 +68,7 @@ func parsePingResponse(cli *Client, resp *http.Response) (types.Ping, error) { ping.Experimental = true } if bv := resp.Header.Get("Builder-Version"); bv != "" { - ping.BuilderVersion = types.BuilderVersion(bv) + ping.BuilderVersion = build.BuilderVersion(bv) } if si := resp.Header.Get("Swarm"); si != "" { state, role, _ := strings.Cut(si, "/") diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go index b04dcf9a1..8553961ba 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/plugin_install.go @@ -7,15 +7,15 @@ import ( "net/http" "net/url" + cerrdefs "github.com/containerd/errdefs" "github.com/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) // PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (_ io.ReadCloser, retErr error) { query := url.Values{} if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") @@ -45,7 +45,7 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types return } defer func() { - if err != nil { + if retErr != nil { delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) ensureReaderClosed(delResp) } @@ -82,7 +82,7 @@ func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileg func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + if cerrdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges newAuthHeader, privilegeErr := options.PrivilegeFunc(ctx) if privilegeErr != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go index 2b913aab6..d9074a736 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/request.go @@ -15,7 +15,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" - "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -116,10 +115,8 @@ func (cli *Client) sendRequest(ctx context.Context, method, path string, query u resp, err := cli.doRequest(req) switch { - case errors.Is(err, context.Canceled): - return nil, errdefs.Cancelled(err) - case errors.Is(err, context.DeadlineExceeded): - return nil, errdefs.Deadline(err) + case errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): + return nil, err case err == nil: return resp, cli.checkResponseErr(resp) default: @@ -195,11 +192,11 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { if serverResp == nil { return nil } - if serverResp.StatusCode >= 200 && serverResp.StatusCode < 400 { + if serverResp.StatusCode >= http.StatusOK && serverResp.StatusCode < http.StatusBadRequest { return nil } defer func() { - retErr = errdefs.FromStatusCode(retErr, serverResp.StatusCode) + retErr = httpErrorFromStatusCode(retErr, serverResp.StatusCode) }() var body []byte @@ -237,7 +234,7 @@ func (cli *Client) checkResponseErr(serverResp *http.Response) (retErr error) { } var daemonErr error - if serverResp.Header.Get("Content-Type") == "application/json" && (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) { + if serverResp.Header.Get("Content-Type") == "application/json" { var errorResponse types.ErrorResponse if err := json.Unmarshal(body, &errorResponse); err != nil { return errors.Wrap(err, "Error reading JSON") diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go index bbd119187..aee051b90 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_create.go @@ -4,22 +4,21 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) // SecretCreate creates a new secret. -func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (swarm.SecretCreateResponse, error) { if err := cli.NewVersionError(ctx, "1.25", "secret create"); err != nil { - return types.SecretCreateResponse{}, err + return swarm.SecretCreateResponse{}, err } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) defer ensureReaderClosed(resp) if err != nil { - return types.SecretCreateResponse{}, err + return swarm.SecretCreateResponse{}, err } - var response types.SecretCreateResponse + var response swarm.SecretCreateResponse err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go index e3b7dbdb9..b158d99ed 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/secret_list.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // SecretList returns the list of secrets. -func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { +func (cli *Client) SecretList(ctx context.Context, options swarm.SecretListOptions) ([]swarm.Secret, error) { if err := cli.NewVersionError(ctx, "1.25", "secret list"); err != nil { return nil, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go index fb12dd5b5..6b9932ae2 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_create.go @@ -8,7 +8,6 @@ import ( "strings" "github.com/distribution/reference" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" @@ -17,7 +16,7 @@ import ( ) // ServiceCreate creates a new service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options swarm.ServiceCreateOptions) (swarm.ServiceCreateResponse, error) { var response swarm.ServiceCreateResponse // Make sure we negotiated (if the client is configured to do so), @@ -37,6 +36,11 @@ func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, if err := validateServiceSpec(service); err != nil { return response, err } + if versions.LessThan(cli.version, "1.30") { + if err := validateAPIVersion(service, cli.version); err != nil { + return response, err + } + } // ensure that the image is tagged var resolveWarning string @@ -191,3 +195,18 @@ func validateServiceSpec(s swarm.ServiceSpec) error { } return nil } + +func validateAPIVersion(c swarm.ServiceSpec, apiVersion string) error { + for _, m := range c.TaskTemplate.ContainerSpec.Mounts { + if m.BindOptions != nil { + if m.BindOptions.NonRecursive && versions.LessThan(apiVersion, "1.40") { + return errors.Errorf("bind-recursive=disabled requires API v1.40 or later") + } + // ReadOnlyNonRecursive can be safely ignored when API < 1.44 + if m.BindOptions.ReadOnlyForceRecursive && versions.LessThan(apiVersion, "1.44") { + return errors.Errorf("bind-recursive=readonly requires API v1.44 or later") + } + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go index 77b4402d3..892e9004f 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_inspect.go @@ -8,12 +8,11 @@ import ( "io" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" ) // ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts swarm.ServiceInspectOptions) (swarm.Service, []byte, error) { serviceID, err := trimID("service", serviceID) if err != nil { return swarm.Service{}, nil, err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go index f589a8423..019873bb6 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_list.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { +func (cli *Client) ServiceList(ctx context.Context, options swarm.ServiceListOptions) ([]swarm.Service, error) { query := url.Values{} if options.Filters.Len() > 0 { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go index ecb98f468..e0c1a2664 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/service_update.go @@ -6,7 +6,6 @@ import ( "net/http" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" @@ -15,7 +14,7 @@ import ( // ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. // It should be the value as set *before* the update. You can find this value in the Meta field // of swarm.Service, which can be found using ServiceInspectWithRaw. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options swarm.ServiceUpdateOptions) (swarm.ServiceUpdateResponse, error) { serviceID, err := trimID("service", serviceID) if err != nil { return swarm.ServiceUpdateResponse{}, err diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index 271fc08c9..6e30daf61 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -4,18 +4,18 @@ import ( "context" "encoding/json" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" ) // SwarmGetUnlockKey retrieves the swarm's unlock key. -func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (swarm.UnlockKeyResponse, error) { resp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) defer ensureReaderClosed(resp) if err != nil { - return types.SwarmUnlockKeyResponse{}, err + return swarm.UnlockKeyResponse{}, err } - var response types.SwarmUnlockKeyResponse + var response swarm.UnlockKeyResponse err = json.NewDecoder(resp.Body).Decode(&response) return response, err } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go index aba7f61e6..5d540c383 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/task_list.go @@ -5,13 +5,12 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" ) // TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { +func (cli *Client) TaskList(ctx context.Context, options swarm.TaskListOptions) ([]swarm.Task, error) { query := url.Values{} if options.Filters.Len() > 0 { diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/utils.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/utils.go index 925d4d8d3..27f2b9884 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/client/utils.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/client/utils.go @@ -6,8 +6,8 @@ import ( "net/url" "strings" + cerrdefs "github.com/containerd/errdefs" "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/errdefs" "github.com/docker/docker/internal/lazyregexp" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -90,7 +90,7 @@ func encodePlatforms(platform ...ocispec.Platform) ([]string, error) { func encodePlatform(platform *ocispec.Platform) (string, error) { p, err := json.Marshal(platform) if err != nil { - return "", errdefs.InvalidParameter(fmt.Errorf("invalid platform: %v", err)) + return "", fmt.Errorf("%w: invalid platform: %v", cerrdefs.ErrInvalidArgument, err) } return string(p), nil } diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go deleted file mode 100644 index a5523c3e9..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/defs.go +++ /dev/null @@ -1,69 +0,0 @@ -package errdefs - -// ErrNotFound signals that the requested object doesn't exist -type ErrNotFound interface { - NotFound() -} - -// ErrInvalidParameter signals that the user input is invalid -type ErrInvalidParameter interface { - InvalidParameter() -} - -// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. -// A change in state should be able to clear this error. -type ErrConflict interface { - Conflict() -} - -// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action -type ErrUnauthorized interface { - Unauthorized() -} - -// ErrUnavailable signals that the requested action/subsystem is not available. -type ErrUnavailable interface { - Unavailable() -} - -// ErrForbidden signals that the requested action cannot be performed under any circumstances. -// When a ErrForbidden is returned, the caller should never retry the action. -type ErrForbidden interface { - Forbidden() -} - -// ErrSystem signals that some internal error occurred. -// An example of this would be a failed mount request. -type ErrSystem interface { - System() -} - -// ErrNotModified signals that an action can't be performed because it's already in the desired state -type ErrNotModified interface { - NotModified() -} - -// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. -type ErrNotImplemented interface { - NotImplemented() -} - -// ErrUnknown signals that the kind of error that occurred is not known. -type ErrUnknown interface { - Unknown() -} - -// ErrCancelled signals that the action was cancelled. -type ErrCancelled interface { - Cancelled() -} - -// ErrDeadline signals that the deadline was reached before the action completed. -type ErrDeadline interface { - DeadlineExceeded() -} - -// ErrDataLoss indicates that data was lost or there is data corruption. -type ErrDataLoss interface { - DataLoss() -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/doc.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/doc.go deleted file mode 100644 index c211f174f..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. -// Errors that cross the package boundary should implement one (and only one) of these interfaces. -// -// Packages should not reference these interfaces directly, only implement them. -// To check if a particular error implements one of these interfaces, there are helper -// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. -// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). -package errdefs // import "github.com/docker/docker/errdefs" diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go deleted file mode 100644 index ab76e6273..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/helpers.go +++ /dev/null @@ -1,305 +0,0 @@ -package errdefs - -import "context" - -type errNotFound struct{ error } - -func (errNotFound) NotFound() {} - -func (e errNotFound) Cause() error { - return e.error -} - -func (e errNotFound) Unwrap() error { - return e.error -} - -// NotFound creates an [ErrNotFound] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrNotFound], -func NotFound(err error) error { - if err == nil || IsNotFound(err) { - return err - } - return errNotFound{err} -} - -type errInvalidParameter struct{ error } - -func (errInvalidParameter) InvalidParameter() {} - -func (e errInvalidParameter) Cause() error { - return e.error -} - -func (e errInvalidParameter) Unwrap() error { - return e.error -} - -// InvalidParameter creates an [ErrInvalidParameter] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrInvalidParameter], -func InvalidParameter(err error) error { - if err == nil || IsInvalidParameter(err) { - return err - } - return errInvalidParameter{err} -} - -type errConflict struct{ error } - -func (errConflict) Conflict() {} - -func (e errConflict) Cause() error { - return e.error -} - -func (e errConflict) Unwrap() error { - return e.error -} - -// Conflict creates an [ErrConflict] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrConflict], -func Conflict(err error) error { - if err == nil || IsConflict(err) { - return err - } - return errConflict{err} -} - -type errUnauthorized struct{ error } - -func (errUnauthorized) Unauthorized() {} - -func (e errUnauthorized) Cause() error { - return e.error -} - -func (e errUnauthorized) Unwrap() error { - return e.error -} - -// Unauthorized creates an [ErrUnauthorized] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnauthorized], -func Unauthorized(err error) error { - if err == nil || IsUnauthorized(err) { - return err - } - return errUnauthorized{err} -} - -type errUnavailable struct{ error } - -func (errUnavailable) Unavailable() {} - -func (e errUnavailable) Cause() error { - return e.error -} - -func (e errUnavailable) Unwrap() error { - return e.error -} - -// Unavailable creates an [ErrUnavailable] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnavailable], -func Unavailable(err error) error { - if err == nil || IsUnavailable(err) { - return err - } - return errUnavailable{err} -} - -type errForbidden struct{ error } - -func (errForbidden) Forbidden() {} - -func (e errForbidden) Cause() error { - return e.error -} - -func (e errForbidden) Unwrap() error { - return e.error -} - -// Forbidden creates an [ErrForbidden] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrForbidden], -func Forbidden(err error) error { - if err == nil || IsForbidden(err) { - return err - } - return errForbidden{err} -} - -type errSystem struct{ error } - -func (errSystem) System() {} - -func (e errSystem) Cause() error { - return e.error -} - -func (e errSystem) Unwrap() error { - return e.error -} - -// System creates an [ErrSystem] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrSystem], -func System(err error) error { - if err == nil || IsSystem(err) { - return err - } - return errSystem{err} -} - -type errNotModified struct{ error } - -func (errNotModified) NotModified() {} - -func (e errNotModified) Cause() error { - return e.error -} - -func (e errNotModified) Unwrap() error { - return e.error -} - -// NotModified creates an [ErrNotModified] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [NotModified], -func NotModified(err error) error { - if err == nil || IsNotModified(err) { - return err - } - return errNotModified{err} -} - -type errNotImplemented struct{ error } - -func (errNotImplemented) NotImplemented() {} - -func (e errNotImplemented) Cause() error { - return e.error -} - -func (e errNotImplemented) Unwrap() error { - return e.error -} - -// NotImplemented creates an [ErrNotImplemented] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrNotImplemented], -func NotImplemented(err error) error { - if err == nil || IsNotImplemented(err) { - return err - } - return errNotImplemented{err} -} - -type errUnknown struct{ error } - -func (errUnknown) Unknown() {} - -func (e errUnknown) Cause() error { - return e.error -} - -func (e errUnknown) Unwrap() error { - return e.error -} - -// Unknown creates an [ErrUnknown] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrUnknown], -func Unknown(err error) error { - if err == nil || IsUnknown(err) { - return err - } - return errUnknown{err} -} - -type errCancelled struct{ error } - -func (errCancelled) Cancelled() {} - -func (e errCancelled) Cause() error { - return e.error -} - -func (e errCancelled) Unwrap() error { - return e.error -} - -// Cancelled creates an [ErrCancelled] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrCancelled], -func Cancelled(err error) error { - if err == nil || IsCancelled(err) { - return err - } - return errCancelled{err} -} - -type errDeadline struct{ error } - -func (errDeadline) DeadlineExceeded() {} - -func (e errDeadline) Cause() error { - return e.error -} - -func (e errDeadline) Unwrap() error { - return e.error -} - -// Deadline creates an [ErrDeadline] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrDeadline], -func Deadline(err error) error { - if err == nil || IsDeadline(err) { - return err - } - return errDeadline{err} -} - -type errDataLoss struct{ error } - -func (errDataLoss) DataLoss() {} - -func (e errDataLoss) Cause() error { - return e.error -} - -func (e errDataLoss) Unwrap() error { - return e.error -} - -// DataLoss creates an [ErrDataLoss] error from the given error. -// It returns the error as-is if it is either nil (no error) or already implements -// [ErrDataLoss], -func DataLoss(err error) error { - if err == nil || IsDataLoss(err) { - return err - } - return errDataLoss{err} -} - -// FromContext returns the error class from the passed in context -func FromContext(ctx context.Context) error { - e := ctx.Err() - if e == nil { - return nil - } - - if e == context.Canceled { - return Cancelled(e) - } - if e == context.DeadlineExceeded { - return Deadline(e) - } - return Unknown(e) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go deleted file mode 100644 index 0a8fadd48..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/http_helpers.go +++ /dev/null @@ -1,47 +0,0 @@ -package errdefs - -import ( - "net/http" -) - -// FromStatusCode creates an errdef error, based on the provided HTTP status-code -func FromStatusCode(err error, statusCode int) error { - if err == nil { - return nil - } - switch statusCode { - case http.StatusNotFound: - return NotFound(err) - case http.StatusBadRequest: - return InvalidParameter(err) - case http.StatusConflict: - return Conflict(err) - case http.StatusUnauthorized: - return Unauthorized(err) - case http.StatusServiceUnavailable: - return Unavailable(err) - case http.StatusForbidden: - return Forbidden(err) - case http.StatusNotModified: - return NotModified(err) - case http.StatusNotImplemented: - return NotImplemented(err) - case http.StatusInternalServerError: - if IsCancelled(err) || IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) { - return err - } - return System(err) - default: - switch { - case statusCode >= 200 && statusCode < 400: - // it's a client error - return err - case statusCode >= 400 && statusCode < 500: - return InvalidParameter(err) - case statusCode >= 500 && statusCode < 600: - return System(err) - default: - return Unknown(err) - } - } -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go deleted file mode 100644 index 30ea7e6fe..000000000 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/errdefs/is.go +++ /dev/null @@ -1,123 +0,0 @@ -package errdefs - -import ( - "context" - "errors" -) - -type causer interface { - Cause() error -} - -type wrapErr interface { - Unwrap() error -} - -func getImplementer(err error) error { - switch e := err.(type) { - case - ErrNotFound, - ErrInvalidParameter, - ErrConflict, - ErrUnauthorized, - ErrUnavailable, - ErrForbidden, - ErrSystem, - ErrNotModified, - ErrNotImplemented, - ErrCancelled, - ErrDeadline, - ErrDataLoss, - ErrUnknown: - return err - case causer: - return getImplementer(e.Cause()) - case wrapErr: - return getImplementer(e.Unwrap()) - default: - return err - } -} - -// IsNotFound returns if the passed in error is an [ErrNotFound], -func IsNotFound(err error) bool { - _, ok := getImplementer(err).(ErrNotFound) - return ok -} - -// IsInvalidParameter returns if the passed in error is an [ErrInvalidParameter]. -func IsInvalidParameter(err error) bool { - _, ok := getImplementer(err).(ErrInvalidParameter) - return ok -} - -// IsConflict returns if the passed in error is an [ErrConflict]. -func IsConflict(err error) bool { - _, ok := getImplementer(err).(ErrConflict) - return ok -} - -// IsUnauthorized returns if the passed in error is an [ErrUnauthorized]. -func IsUnauthorized(err error) bool { - _, ok := getImplementer(err).(ErrUnauthorized) - return ok -} - -// IsUnavailable returns if the passed in error is an [ErrUnavailable]. -func IsUnavailable(err error) bool { - _, ok := getImplementer(err).(ErrUnavailable) - return ok -} - -// IsForbidden returns if the passed in error is an [ErrForbidden]. -func IsForbidden(err error) bool { - _, ok := getImplementer(err).(ErrForbidden) - return ok -} - -// IsSystem returns if the passed in error is an [ErrSystem]. -func IsSystem(err error) bool { - _, ok := getImplementer(err).(ErrSystem) - return ok -} - -// IsNotModified returns if the passed in error is an [ErrNotModified]. -func IsNotModified(err error) bool { - _, ok := getImplementer(err).(ErrNotModified) - return ok -} - -// IsNotImplemented returns if the passed in error is an [ErrNotImplemented]. -func IsNotImplemented(err error) bool { - _, ok := getImplementer(err).(ErrNotImplemented) - return ok -} - -// IsUnknown returns if the passed in error is an [ErrUnknown]. -func IsUnknown(err error) bool { - _, ok := getImplementer(err).(ErrUnknown) - return ok -} - -// IsCancelled returns if the passed in error is an [ErrCancelled]. -func IsCancelled(err error) bool { - _, ok := getImplementer(err).(ErrCancelled) - return ok -} - -// IsDeadline returns if the passed in error is an [ErrDeadline]. -func IsDeadline(err error) bool { - _, ok := getImplementer(err).(ErrDeadline) - return ok -} - -// IsDataLoss returns if the passed in error is an [ErrDataLoss]. -func IsDataLoss(err error) bool { - _, ok := getImplementer(err).(ErrDataLoss) - return ok -} - -// IsContext returns if the passed in error is due to context cancellation or deadline exceeded. -func IsContext(err error) bool { - return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) -} diff --git a/src/cmd/linuxkit/vendor/github.com/docker/docker/internal/multierror/multierror.go b/src/cmd/linuxkit/vendor/github.com/docker/docker/internal/multierror/multierror.go index cf4d6a595..e899f4de8 100644 --- a/src/cmd/linuxkit/vendor/github.com/docker/docker/internal/multierror/multierror.go +++ b/src/cmd/linuxkit/vendor/github.com/docker/docker/internal/multierror/multierror.go @@ -36,7 +36,7 @@ func (e *joinError) Error() string { } stringErrs := make([]string, 0, len(e.errs)) for _, subErr := range e.errs { - stringErrs = append(stringErrs, strings.Replace(subErr.Error(), "\n", "\n\t", -1)) + stringErrs = append(stringErrs, strings.ReplaceAll(subErr.Error(), "\n", "\n\t")) } return "* " + strings.Join(stringErrs, "\n* ") } diff --git a/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index d127d4362..def01a6be 100644 --- a/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -19,6 +19,7 @@ const ( tbFunc // func(T) bool ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R @@ -28,11 +29,13 @@ const ( Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { @@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true diff --git a/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/options.go b/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/options.go index 754496f3b..ba3fce81f 100644 --- a/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/options.go +++ b/src/cmd/linuxkit/vendor/github.com/google/go-cmp/cmp/options.go @@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) { if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go index d7b15fcfb..2e50082ad 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go @@ -94,7 +94,7 @@ func Int64(val string) (int64, error) { } // Int64Slice converts 'val' where individual integers are separated by -// 'sep' into a int64 slice. +// 'sep' into an int64 slice. func Int64Slice(val, sep string) ([]int64, error) { s := strings.Split(val, sep) values := make([]int64, len(s)) @@ -118,7 +118,7 @@ func Int32(val string) (int32, error) { } // Int32Slice converts 'val' where individual integers are separated by -// 'sep' into a int32 slice. +// 'sep' into an int32 slice. func Int32Slice(val, sep string) ([]int32, error) { s := strings.Split(val, sep) values := make([]int32, len(s)) @@ -190,7 +190,7 @@ func Bytes(val string) ([]byte, error) { } // BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe -// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +// base64 without padding, are separated by 'sep' into a slice of byte slices. func BytesSlice(val, sep string) ([][]byte, error) { s := strings.Split(val, sep) values := make([][]byte, len(s)) diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go index 01f573419..41cd4f503 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go @@ -81,6 +81,21 @@ func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.R mux.errorHandler(ctx, mux, marshaler, w, r, err) } +// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection. +func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + st := mux.streamErrorHandler(ctx, err) + msg := errorChunk(st) + buf, err := marshaler.Marshal(msg) + if err != nil { + grpclog.Errorf("Failed to marshal an error: %v", err) + return + } + if _, err := w.Write(buf); err != nil { + grpclog.Errorf("Failed to notify error to client: %v", err) + return + } +} + // DefaultHTTPErrorHandler is the default error handler. // If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode. // If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go index 9005d6a0b..2fcd7af3c 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go @@ -155,7 +155,7 @@ func buildPathsBlindly(name string, in interface{}) []string { return paths } -// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask type fieldMaskPathItem struct { // the list of prior fields leading up to node connected by dots path string diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go index 9f50a569e..f0727cf7c 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go @@ -64,7 +64,13 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal } if !wroteHeader { - w.Header().Set("Content-Type", marshaler.ContentType(respRw)) + var contentType string + if sct, ok := marshaler.(StreamContentType); ok { + contentType = sct.StreamContentType(respRw) + } else { + contentType = marshaler.ContentType(respRw) + } + w.Header().Set("Content-Type", contentType) } var buf []byte @@ -190,11 +196,11 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha return } - if !doForwardTrailers { + if !doForwardTrailers && mux.writeContentLength { w.Header().Set("Content-Length", strconv.Itoa(len(buf))) } - if _, err = w.Write(buf); err != nil { + if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) { grpclog.Errorf("Failed to write response: %v", err) } diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go index 2c0d25ff4..b1dfc37af 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go @@ -48,3 +48,11 @@ type Delimited interface { // Delimiter returns the record separator for the stream. Delimiter() []byte } + +// StreamContentType defines the streaming content type. +type StreamContentType interface { + // StreamContentType returns the content type for a stream. This shares the + // same behaviour as for `Marshaler.ContentType`, but is called, if present, + // in the case of a streamed response. + StreamContentType(v interface{}) string +} diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go index 0b051e6e8..07c28112c 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go @@ -86,8 +86,8 @@ func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { // It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. // // For example, you could allow the client to specify the use of the runtime.JSONPb marshaler -// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler -// with a "application/json" Content-Type. +// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with an "application/json" Content-Type. // "*" can be used to match any Content-Type. // This can be attached to a ServerMux with the marshaler option. func makeMarshalerMIMERegistry() marshalerRegistry { diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go index 60c2065dd..19255ec44 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go @@ -71,6 +71,7 @@ type ServeMux struct { routingErrorHandler RoutingErrorHandlerFunc disablePathLengthFallback bool unescapingMode UnescapingMode + writeContentLength bool } // ServeMuxOption is an option that can be given to a ServeMux on construction. @@ -258,6 +259,13 @@ func WithDisablePathLengthFallback() ServeMuxOption { } } +// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses +func WithWriteContentLength() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.writeContentLength = true + } +} + // WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath. // When called the handler will forward the request to the upstream grpc service health check (defined in the // gRPC Health Checking Protocol). diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go index d549407f2..f710036b3 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go @@ -40,7 +40,7 @@ func Float32P(val string) (*float32, error) { } // Int64P parses the given string representation of an integer -// and returns a pointer to a int64 whose value is same as the parsed integer. +// and returns a pointer to an int64 whose value is same as the parsed integer. func Int64P(val string) (*int64, error) { i, err := Int64(val) if err != nil { @@ -50,7 +50,7 @@ func Int64P(val string) (*int64, error) { } // Int32P parses the given string representation of an integer -// and returns a pointer to a int32 whose value is same as the parsed integer. +// and returns a pointer to an int32 whose value is same as the parsed integer. func Int32P(val string) (*int32, error) { i, err := Int32(val) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go index fe634174b..8549dfb97 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go @@ -126,6 +126,15 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin } } + // Check if oneof already set + if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() { + if f := msgValue.WhichOneof(of); f != nil { + if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() { + return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) + } + } + } + // If this is the last element, we're done if i == len(fieldPath)-1 { break @@ -140,13 +149,6 @@ func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []strin msgValue = msgValue.Mutable(fieldDescriptor).Message() } - // Check if oneof already set - if of := fieldDescriptor.ContainingOneof(); of != nil { - if f := msgValue.WhichOneof(of); f != nil { - return fmt.Errorf("field already set for oneof %q", of.FullName().Name()) - } - } - switch { case fieldDescriptor.IsList(): return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values) @@ -291,7 +293,11 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p if err != nil { return protoreflect.Value{}, err } - msg = timestamppb.New(t) + timestamp := timestamppb.New(t) + if ok := timestamp.IsValid(); !ok { + return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value) + } + msg = timestamp case "google.protobuf.Duration": d, err := time.ParseDuration(value) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go index dfe7de486..38ca39cc5 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go @@ -1,6 +1,6 @@ package utilities -// An OpCode is a opcode of compiled path patterns. +// OpCode is an opcode of compiled path patterns. type OpCode int // These constants are the valid values of OpCode. diff --git a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go index d224ab776..66aa5f2dc 100644 --- a/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go +++ b/src/cmd/linuxkit/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go @@ -5,7 +5,7 @@ import ( "strings" ) -// flagInterface is an cut down interface to `flag` +// flagInterface is a cut down interface to `flag` type flagInterface interface { Var(value flag.Value, name string, usage string) } diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go new file mode 100644 index 000000000..73aafe7e1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/attestations.go @@ -0,0 +1,99 @@ +package in_toto + +import ( + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" +) + +const ( + // StatementInTotoV01 is the statement type for the generalized link format + // containing statements. This is constant for all predicate types. + StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" + // PredicateSPDX represents a SBOM using the SPDX standard. + // The SPDX mandates 'spdxVersion' field, so predicate type can omit + // version. + PredicateSPDX = "https://spdx.dev/Document" + // PredicateCycloneDX represents a CycloneDX SBOM + PredicateCycloneDX = "https://cyclonedx.org/bom" + // PredicateLinkV1 represents an in-toto 0.9 link. + PredicateLinkV1 = "https://in-toto.io/Link/v1" +) + +// Subject describes the set of software artifacts the statement applies to. +type Subject struct { + Name string `json:"name"` + Digest common.DigestSet `json:"digest"` +} + +// StatementHeader defines the common fields for all statements +type StatementHeader struct { + Type string `json:"_type"` + PredicateType string `json:"predicateType"` + Subject []Subject `json:"subject"` +} + +/* +Statement binds the attestation to a particular subject and identifies the +of the predicate. This struct represents a generic statement. +*/ +type Statement struct { + StatementHeader + // Predicate contains type speficic metadata. + Predicate interface{} `json:"predicate"` +} + +// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. +type ProvenanceStatementSLSA01 struct { + StatementHeader + Predicate slsa01.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. +type ProvenanceStatementSLSA02 struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatementSLSA1 is the definition for an entire provenance statement with SLSA 1.0 predicate. +type ProvenanceStatementSLSA1 struct { + StatementHeader + Predicate slsa1.ProvenancePredicate `json:"predicate"` +} + +// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. +// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). +type ProvenanceStatement struct { + StatementHeader + Predicate slsa02.ProvenancePredicate `json:"predicate"` +} + +// LinkStatement is the definition for an entire link statement. +type LinkStatement struct { + StatementHeader + Predicate Link `json:"predicate"` +} + +/* +SPDXStatement is the definition for an entire SPDX statement. +This is currently not implemented. Some tooling exists here: +https://github.com/spdx/tools-golang, but this software is still in +early state. +This struct is the same as the generic Statement struct but is added for +completeness +*/ +type SPDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} + +/* +CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not +currently serialized just as its SPDX counterpart. It is an empty +interface, like the generic Statement. +*/ +type CycloneDXStatement struct { + StatementHeader + Predicate interface{} `json:"predicate"` +} diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go new file mode 100644 index 000000000..2c8afff1f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/envelope.go @@ -0,0 +1,166 @@ +package in_toto + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" + "github.com/secure-systems-lab/go-securesystemslib/dsse" + "github.com/secure-systems-lab/go-securesystemslib/signerverifier" +) + +// PayloadType is the payload type used for links and layouts. +const PayloadType = "application/vnd.in-toto+json" + +// ErrInvalidPayloadType indicates that the envelope used an unknown payload type +var ErrInvalidPayloadType = errors.New("unknown payload type") + +type Envelope struct { + envelope *dsse.Envelope + payload any +} + +func loadEnvelope(env *dsse.Envelope) (*Envelope, error) { + e := &Envelope{envelope: env} + + contentBytes, err := env.DecodeB64Payload() + if err != nil { + return nil, err + } + + payload, err := loadPayload(contentBytes) + if err != nil { + return nil, err + } + e.payload = payload + + return e, nil +} + +func (e *Envelope) SetPayload(payload any) error { + encodedBytes, err := cjson.EncodeCanonical(payload) + if err != nil { + return err + } + + e.payload = payload + e.envelope = &dsse.Envelope{ + Payload: base64.StdEncoding.EncodeToString(encodedBytes), + PayloadType: PayloadType, + } + + return nil +} + +func (e *Envelope) GetPayload() any { + return e.payload +} + +func (e *Envelope) VerifySignature(key Key) error { + verifier, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + ev, err := dsse.NewEnvelopeVerifier(verifier) + if err != nil { + return err + } + + _, err = ev.Verify(context.Background(), e.envelope) + return err +} + +func (e *Envelope) Sign(key Key) error { + signer, err := getSignerVerifierFromKey(key) + if err != nil { + return err + } + + es, err := dsse.NewEnvelopeSigner(signer) + if err != nil { + return err + } + + payload, err := e.envelope.DecodeB64Payload() + if err != nil { + return err + } + + env, err := es.SignPayload(context.Background(), e.envelope.PayloadType, payload) + if err != nil { + return err + } + + e.envelope = env + return nil +} + +func (e *Envelope) Sigs() []Signature { + sigs := []Signature{} + for _, s := range e.envelope.Signatures { + sigs = append(sigs, Signature{ + KeyID: s.KeyID, + Sig: s.Sig, + }) + } + return sigs +} + +func (e *Envelope) GetSignatureForKeyID(keyID string) (Signature, error) { + for _, s := range e.Sigs() { + if s.KeyID == keyID { + return s, nil + } + } + + return Signature{}, fmt.Errorf("no signature found for key '%s'", keyID) +} + +func (e *Envelope) Dump(path string) error { + jsonBytes, err := json.MarshalIndent(e.envelope, "", " ") + if err != nil { + return err + } + + // Write JSON bytes to the passed path with permissions (-rw-r--r--) + err = os.WriteFile(path, jsonBytes, 0644) + if err != nil { + return err + } + + return nil +} + +func getSignerVerifierFromKey(key Key) (dsse.SignerVerifier, error) { + sslibKey := getSSLibKeyFromKey(key) + + switch sslibKey.KeyType { + case signerverifier.RSAKeyType: + return signerverifier.NewRSAPSSSignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ED25519KeyType: + return signerverifier.NewED25519SignerVerifierFromSSLibKey(&sslibKey) + case signerverifier.ECDSAKeyType: + return signerverifier.NewECDSASignerVerifierFromSSLibKey(&sslibKey) + } + + return nil, ErrUnsupportedKeyType +} + +func getSSLibKeyFromKey(key Key) signerverifier.SSLibKey { + return signerverifier.SSLibKey{ + KeyType: key.KeyType, + KeyIDHashAlgorithms: key.KeyIDHashAlgorithms, + KeyID: key.KeyID, + Scheme: key.Scheme, + KeyVal: signerverifier.KeyVal{ + Public: key.KeyVal.Public, + Private: key.KeyVal.Private, + Certificate: key.KeyVal.Certificate, + }, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go index 7de482821..52429ca44 100644 --- a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/keylib.go @@ -13,7 +13,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "strings" @@ -325,7 +324,7 @@ func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []st return ErrNoPEMBlock } // Read key bytes - pemBytes, err := ioutil.ReadAll(r) + pemBytes, err := io.ReadAll(r) if err != nil { return err } @@ -344,7 +343,7 @@ func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { return ErrNoPEMBlock } // Read key bytes - pemBytes, err := ioutil.ReadAll(r) + pemBytes, err := io.ReadAll(r) if err != nil { return err } @@ -366,7 +365,7 @@ func (k *Key) LoadKeyReaderDefaults(r io.Reader) error { func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) { keyIDHashAlgorithms = []string{"sha256", "sha512"} - switch key.(type) { + switch k := key.(type) { case *rsa.PublicKey, *rsa.PrivateKey: scheme = rsassapsssha256Scheme case ed25519.PrivateKey, ed25519.PublicKey: @@ -374,7 +373,7 @@ func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms [] case *ecdsa.PrivateKey, *ecdsa.PublicKey: scheme = ecdsaSha2nistp256 case *x509.Certificate: - return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey) + return getDefaultKeyScheme(k.PublicKey) default: err = ErrUnsupportedKeyType } @@ -382,11 +381,10 @@ func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms [] return scheme, keyIDHashAlgorithms, err } -func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { - - switch key.(type) { +func (k *Key) loadKey(keyObj interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error { + switch key := keyObj.(type) { case *rsa.PublicKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey)) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } @@ -396,7 +394,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH case *rsa.PrivateKey: // Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280) // This behavior is consistent to the securesystemslib - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public()) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) if err != nil { return err } @@ -404,16 +402,16 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case ed25519.PublicKey: - if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + if err := k.setKeyComponents(key, []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case ed25519.PrivateKey: - pubKeyBytes := key.(ed25519.PrivateKey).Public() - if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { + pubKeyBytes := key.Public() + if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil { return err } case *ecdsa.PrivateKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public()) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.Public()) if err != nil { return err } @@ -421,7 +419,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case *ecdsa.PublicKey: - pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey)) + pubKeyBytes, err := x509.MarshalPKIXPublicKey(key) if err != nil { return err } @@ -429,7 +427,7 @@ func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDH return err } case *x509.Certificate: - err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms) + err := k.loadKey(key.PublicKey, pemData, scheme, keyIDHashAlgorithms) if err != nil { return err } diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go index e22b79da3..f56b784ea 100644 --- a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/model.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io/ioutil" "os" "reflect" "regexp" @@ -15,10 +14,6 @@ import ( "strings" "time" - "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" - slsa01 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1" - slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" - "github.com/secure-systems-lab/go-securesystemslib/cjson" "github.com/secure-systems-lab/go-securesystemslib/dsse" ) @@ -30,7 +25,7 @@ and private keys in PEM format stored as strings. For public keys the Private field may be an empty string. */ type KeyVal struct { - Private string `json:"private"` + Private string `json:"private,omitempty"` Public string `json:"public"` Certificate string `json:"certificate,omitempty"` } @@ -48,9 +43,6 @@ type Key struct { Scheme string `json:"scheme"` } -// PayloadType is the payload type used for links and layouts. -const PayloadType = "application/vnd.in-toto+json" - // ErrEmptyKeyField will be thrown if a field in our Key struct is empty. var ErrEmptyKeyField = errors.New("empty field in key") @@ -73,23 +65,6 @@ var ErrNoPublicKey = errors.New("the given key is not a public key") // for example: curve size = "521" and scheme = "ecdsa-sha2-nistp224" var ErrCurveSizeSchemeMismatch = errors.New("the scheme does not match the curve size") -const ( - // StatementInTotoV01 is the statement type for the generalized link format - // containing statements. This is constant for all predicate types. - StatementInTotoV01 = "https://in-toto.io/Statement/v0.1" - // PredicateSPDX represents a SBOM using the SPDX standard. - // The SPDX mandates 'spdxVersion' field, so predicate type can omit - // version. - PredicateSPDX = "https://spdx.dev/Document" - // PredicateCycloneDX represents a CycloneDX SBOM - PredicateCycloneDX = "https://cyclonedx.org/bom" - // PredicateLinkV1 represents an in-toto 0.9 link. - PredicateLinkV1 = "https://in-toto.io/Link/v1" -) - -// ErrInvalidPayloadType indicates that the envelope used an unkown payload type -var ErrInvalidPayloadType = errors.New("unknown payload type") - /* matchEcdsaScheme checks if the scheme suffix, matches the ecdsa key curve size. We do not need a full regex match here, because @@ -702,6 +677,67 @@ func validateLayout(layout Layout) error { return nil } +type Metadata interface { + Sign(Key) error + VerifySignature(Key) error + GetPayload() any + Sigs() []Signature + GetSignatureForKeyID(string) (Signature, error) + Dump(string) error +} + +func LoadMetadata(path string) (Metadata, error) { + jsonBytes, err := os.ReadFile(path) + if err != nil { + return nil, err + } + + var rawData map[string]*json.RawMessage + if err := json.Unmarshal(jsonBytes, &rawData); err != nil { + return nil, err + } + + if _, ok := rawData["payloadType"]; ok { + dsseEnv := &dsse.Envelope{} + if rawData["payload"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata envelope requires 'payload' and 'signatures' parts") + } + + if err := json.Unmarshal(jsonBytes, dsseEnv); err != nil { + return nil, err + } + + if dsseEnv.PayloadType != PayloadType { + return nil, ErrInvalidPayloadType + } + + return loadEnvelope(dsseEnv) + } + + mb := &Metablock{} + + // Error out on missing `signed` or `signatures` field or if + // one of them has a `null` value, which would lead to a nil pointer + // dereference in Unmarshal below. + if rawData["signed"] == nil || rawData["signatures"] == nil { + return nil, fmt.Errorf("in-toto metadata requires 'signed' and 'signatures' parts") + } + + // Fully unmarshal signatures part + if err := json.Unmarshal(*rawData["signatures"], &mb.Signatures); err != nil { + return nil, err + } + + payload, err := loadPayload(*rawData["signed"]) + if err != nil { + return nil, err + } + + mb.Signed = payload + + return mb, nil +} + /* Metablock is a generic container for signable in-toto objects such as Layout or Link. It has two fields, one that contains the signable object and one that @@ -767,17 +803,13 @@ func checkRequiredJSONFields(obj map[string]interface{}, Load parses JSON formatted metadata at the passed path into the Metablock object on which it was called. It returns an error if it cannot parse a valid JSON formatted Metablock that contains a Link or Layout. + +Deprecated: Use LoadMetadata for a signature wrapper agnostic way to load an +envelope. */ func (mb *Metablock) Load(path string) error { - // Open file and close before returning - jsonFile, err := os.Open(path) - if err != nil { - return err - } - defer jsonFile.Close() - // Read entire file - jsonBytes, err := ioutil.ReadAll(jsonFile) + jsonBytes, err := os.ReadFile(path) if err != nil { return err } @@ -803,54 +835,14 @@ func (mb *Metablock) Load(path string) error { return err } - // Temporarily copy signed to opaque map to inspect the `_type` of signed - // and create link or layout accordingly - var signed map[string]interface{} - if err := json.Unmarshal(*rawMb["signed"], &signed); err != nil { + payload, err := loadPayload(*rawMb["signed"]) + if err != nil { return err } - if signed["_type"] == "link" { - var link Link - if err := checkRequiredJSONFields(signed, reflect.TypeOf(link)); err != nil { - return err - } + mb.Signed = payload - data, err := rawMb["signed"].MarshalJSON() - if err != nil { - return err - } - decoder := json.NewDecoder(strings.NewReader(string(data))) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&link); err != nil { - return err - } - mb.Signed = link - - } else if signed["_type"] == "layout" { - var layout Layout - if err := checkRequiredJSONFields(signed, reflect.TypeOf(layout)); err != nil { - return err - } - - data, err := rawMb["signed"].MarshalJSON() - if err != nil { - return err - } - decoder := json.NewDecoder(strings.NewReader(string(data))) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&layout); err != nil { - return err - } - - mb.Signed = layout - - } else { - return fmt.Errorf("the '_type' field of the 'signed' part of in-toto" + - " metadata must be one of 'link' or 'layout'") - } - - return jsonFile.Close() + return nil } /* @@ -866,7 +858,7 @@ func (mb *Metablock) Dump(path string) error { } // Write JSON bytes to the passed path with permissions (-rw-r--r--) - err = ioutil.WriteFile(path, jsonBytes, 0644) + err = os.WriteFile(path, jsonBytes, 0644) if err != nil { return err } @@ -883,6 +875,14 @@ func (mb *Metablock) GetSignableRepresentation() ([]byte, error) { return cjson.EncodeCanonical(mb.Signed) } +func (mb *Metablock) GetPayload() any { + return mb.Signed +} + +func (mb *Metablock) Sigs() []Signature { + return mb.Signatures +} + /* VerifySignature verifies the first signature, corresponding to the passed Key, that it finds in the Signatures field of the Metablock on which it was called. @@ -965,109 +965,3 @@ func (mb *Metablock) Sign(key Key) error { mb.Signatures = append(mb.Signatures, newSignature) return nil } - -// Subject describes the set of software artifacts the statement applies to. -type Subject struct { - Name string `json:"name"` - Digest common.DigestSet `json:"digest"` -} - -// StatementHeader defines the common fields for all statements -type StatementHeader struct { - Type string `json:"_type"` - PredicateType string `json:"predicateType"` - Subject []Subject `json:"subject"` -} - -/* -Statement binds the attestation to a particular subject and identifies the -of the predicate. This struct represents a generic statement. -*/ -type Statement struct { - StatementHeader - // Predicate contains type speficic metadata. - Predicate interface{} `json:"predicate"` -} - -// ProvenanceStatementSLSA01 is the definition for an entire provenance statement with SLSA 0.1 predicate. -type ProvenanceStatementSLSA01 struct { - StatementHeader - Predicate slsa01.ProvenancePredicate `json:"predicate"` -} - -// ProvenanceStatementSLSA02 is the definition for an entire provenance statement with SLSA 0.2 predicate. -type ProvenanceStatementSLSA02 struct { - StatementHeader - Predicate slsa02.ProvenancePredicate `json:"predicate"` -} - -// ProvenanceStatement is the definition for an entire provenance statement with SLSA 0.2 predicate. -// Deprecated: Only version-specific provenance structs will be maintained (ProvenanceStatementSLSA01, ProvenanceStatementSLSA02). -type ProvenanceStatement struct { - StatementHeader - Predicate slsa02.ProvenancePredicate `json:"predicate"` -} - -// LinkStatement is the definition for an entire link statement. -type LinkStatement struct { - StatementHeader - Predicate Link `json:"predicate"` -} - -/* -SPDXStatement is the definition for an entire SPDX statement. -This is currently not implemented. Some tooling exists here: -https://github.com/spdx/tools-golang, but this software is still in -early state. -This struct is the same as the generic Statement struct but is added for -completeness -*/ -type SPDXStatement struct { - StatementHeader - Predicate interface{} `json:"predicate"` -} - -/* -CycloneDXStatement defines a cyclonedx sbom in the predicate. It is not -currently serialized just as its SPDX counterpart. It is an empty -interface, like the generic Statement. -*/ -type CycloneDXStatement struct { - StatementHeader - Predicate interface{} `json:"predicate"` -} - -/* -DSSESigner provides signature generation and validation based on the SSL -Signing Spec: https://github.com/secure-systems-lab/signing-spec -as describe by: https://github.com/MarkLodato/ITE/tree/media-type/ITE/5 -It wraps the generic SSL envelope signer and enforces the correct payload -type both during signature generation and validation. -*/ -type DSSESigner struct { - signer *dsse.EnvelopeSigner -} - -func NewDSSESigner(p ...dsse.SignVerifier) (*DSSESigner, error) { - es, err := dsse.NewEnvelopeSigner(p...) - if err != nil { - return nil, err - } - - return &DSSESigner{ - signer: es, - }, nil -} - -func (s *DSSESigner) SignPayload(body []byte) (*dsse.Envelope, error) { - return s.signer.SignPayload(PayloadType, body) -} - -func (s *DSSESigner) Verify(e *dsse.Envelope) error { - if e.PayloadType != PayloadType { - return ErrInvalidPayloadType - } - - _, err := s.signer.Verify(e) - return err -} diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go index 87e690507..f0a55d821 100644 --- a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/runlib.go @@ -4,7 +4,7 @@ import ( "bytes" "errors" "fmt" - "io/ioutil" + "io" "os" "os/exec" "path/filepath" @@ -44,7 +44,7 @@ normalized to Unix-style line separators (LF) before hashing file contents. func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) { supportedHashMappings := getHashMapping() // Read file from passed path - contents, err := ioutil.ReadFile(path) + contents, err := os.ReadFile(path) hashedContentsMap := make(map[string]interface{}) if err != nil { return nil, err @@ -92,10 +92,10 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) { +func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (evalArtifacts map[string]interface{}, err error) { // Make sure to initialize a fresh hashset for every RecordArtifacts call visitedSymlinks = NewSet() - evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) // pass result and error through return evalArtifacts, err } @@ -118,7 +118,7 @@ the following format: If recording an artifact fails the first return value is nil and the second return value is the error. */ -func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) { +func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool) (map[string]interface{}, error) { artifacts := make(map[string]interface{}) for _, path := range paths { err := filepath.Walk(path, @@ -160,18 +160,35 @@ func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns if err != nil { return err } + info, err := os.Stat(evalSym) + if err != nil { + return err + } + targetIsDir := false + if info.IsDir() { + if !followSymlinkDirs { + // We don't follow symlinked directories + return nil + } + targetIsDir = true + } // add symlink to visitedSymlinks set // this way, we know which link we have visited already // if we visit a symlink twice, we have detected a symlink cycle visitedSymlinks.Add(path) - // We recursively call RecordArtifacts() to follow + // We recursively call recordArtifacts() to follow // the new path. - evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if evalErr != nil { return evalErr } for key, value := range evalArtifacts { - artifacts[key] = value + if targetIsDir { + symlinkPath := filepath.Join(path, strings.TrimPrefix(key, evalSym)) + artifacts[symlinkPath] = value + } else { + artifacts[path] = value + } } return nil } @@ -189,8 +206,7 @@ func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns } } // Check if path is unique - _, existingPath := artifacts[path] - if existingPath { + if _, exists := artifacts[path]; exists { return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path) } artifacts[path] = artifact @@ -273,8 +289,8 @@ func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) } // TODO: duplicate stdout, stderr - stdout, _ := ioutil.ReadAll(stdoutPipe) - stderr, _ := ioutil.ReadAll(stderrPipe) + stdout, _ := io.ReadAll(stdoutPipe) + stderr, _ := io.ReadAll(stderrPipe) retVal := waitErrToExitCode(cmd.Wait()) @@ -293,14 +309,10 @@ and materials at the passed materialPaths. The returned link is wrapped in a Metablock object. If command execution or artifact recording fails the first return value is an empty Metablock and the second return value is the error. */ -func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, - cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, - lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - - materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) +func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string, cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } // make sure that we only run RunCommand if cmdArgs is not nil or empty @@ -308,16 +320,16 @@ func InTotoRun(name string, runDir string, materialPaths []string, productPaths if len(cmdArgs) != 0 { byProducts, err = RunCommand(cmdArgs, runDir) if err != nil { - return linkMb, err + return nil, err } } - products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - linkMb.Signed = Link{ + link := Link{ Type: "link", Name: name, Materials: materials, @@ -327,14 +339,25 @@ func InTotoRun(name string, runDir string, materialPaths []string, productPaths Environment: map[string]interface{}{}, } - linkMb.Signatures = []Signature{} - // We use a new feature from Go1.13 here, to check the key struct. - // IsZero() will return True, if the key hasn't been initialized + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } - // with other values than the default ones. + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { - return linkMb, err + return nil, err } } @@ -347,14 +370,13 @@ in order to provide evidence for supply chain steps that cannot be carries out by a single command. InTotoRecordStart collects the hashes of the materials before any commands are run, signs the unfinished link, and returns the link. */ -func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) +func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } - linkMb.Signed = Link{ + link := Link{ Type: "link", Name: name, Materials: materials, @@ -364,9 +386,26 @@ func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorit Environment: map[string]interface{}{}, } + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} + linkMb.Signatures = []Signature{} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { - return linkMb, err + return nil, err } } @@ -380,25 +419,39 @@ created by InTotoRecordStart and records the hashes of any products creted by commands run between InTotoRecordStart and InTotoRecordStop. The resultant finished link metablock is then signed by the provided key and returned. */ -func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) { - var linkMb Metablock - if err := prelimLinkMb.VerifySignature(key); err != nil { - return linkMb, err +func InTotoRecordStop(prelimLinkEnv Metadata, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool, followSymlinkDirs bool, useDSSE bool) (Metadata, error) { + if err := prelimLinkEnv.VerifySignature(key); err != nil { + return nil, err } - link, ok := prelimLinkMb.Signed.(Link) + link, ok := prelimLinkEnv.GetPayload().(Link) if !ok { - return linkMb, errors.New("invalid metadata block") + return nil, errors.New("invalid metadata block") } - products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization) + products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization, followSymlinkDirs) if err != nil { - return linkMb, err + return nil, err } link.Products = products - linkMb.Signed = link + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(link); err != nil { + return nil, err + } + + if !reflect.ValueOf(key).IsZero() { + if err := env.Sign(key); err != nil { + return nil, err + } + } + + return env, nil + } + + linkMb := &Metablock{Signed: link, Signatures: []Signature{}} if !reflect.ValueOf(key).IsZero() { if err := linkMb.Sign(key); err != nil { return linkMb, err diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go index 5fca7abb7..40416e29a 100644 --- a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2/provenance.go @@ -11,6 +11,13 @@ const ( PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2" ) +// These are type aliases to common to avoid backwards incompatible changes. +type ( + DigestSet = common.DigestSet + ProvenanceBuilder = common.ProvenanceBuilder + ProvenanceMaterial = common.ProvenanceMaterial +) + // ProvenancePredicate is the provenance predicate definition. type ProvenancePredicate struct { // Builder identifies the entity that executed the invocation, which is trusted to have diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go new file mode 100644 index 000000000..e849731dc --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1/provenance.go @@ -0,0 +1,151 @@ +package v1 + +import ( + "time" + + "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" +) + +const ( + // PredicateSLSAProvenance represents a build provenance for an artifact. + PredicateSLSAProvenance = "https://slsa.dev/provenance/v1" +) + +// ProvenancePredicate is the provenance predicate definition. +type ProvenancePredicate struct { + // The BuildDefinition describes all of the inputs to the build. The + // accuracy and completeness are implied by runDetails.builder.id. + // + // It SHOULD contain all the information necessary and sufficient to + // initialize the build and begin execution. + BuildDefinition ProvenanceBuildDefinition `json:"buildDefinition"` + + // Details specific to this particular execution of the build. + RunDetails ProvenanceRunDetails `json:"runDetails"` +} + +// ProvenanceBuildDefinition describes the inputs to the build. +type ProvenanceBuildDefinition struct { + // Identifies the template for how to perform the build and interpret the + // parameters and dependencies. + + // The URI SHOULD resolve to a human-readable specification that includes: + // overall description of the build type; schema for externalParameters and + // systemParameters; unambiguous instructions for how to initiate the build + // given this BuildDefinition, and a complete example. + BuildType string `json:"buildType"` + + // The parameters that are under external control, such as those set by a + // user or tenant of the build system. They MUST be complete at SLSA Build + // L3, meaning that that there is no additional mechanism for an external + // party to influence the build. (At lower SLSA Build levels, the + // completeness MAY be best effort.) + + // The build system SHOULD be designed to minimize the size and complexity + // of externalParameters, in order to reduce fragility and ease + // verification. Consumers SHOULD have an expectation of what “good” looks + // like; the more information that they need to check, the harder that task + // becomes. + ExternalParameters interface{} `json:"externalParameters"` + + // The parameters that are under the control of the entity represented by + // builder.id. The primary intention of this field is for debugging, + // incident response, and vulnerability management. The values here MAY be + // necessary for reproducing the build. There is no need to verify these + // parameters because the build system is already trusted, and in many cases + // it is not practical to do so. + InternalParameters interface{} `json:"internalParameters,omitempty"` + + // Unordered collection of artifacts needed at build time. Completeness is + // best effort, at least through SLSA Build L3. For example, if the build + // script fetches and executes “example.com/foo.sh”, which in turn fetches + // “example.com/bar.tar.gz”, then both “foo.sh” and “bar.tar.gz” SHOULD be + // listed here. + ResolvedDependencies []ResourceDescriptor `json:"resolvedDependencies,omitempty"` +} + +// ProvenanceRunDetails includes details specific to a particular execution of a +// build. +type ProvenanceRunDetails struct { + // Identifies the entity that executed the invocation, which is trusted to + // have correctly performed the operation and populated this provenance. + // + // This field is REQUIRED for SLSA Build 1 unless id is implicit from the + // attestation envelope. + Builder Builder `json:"builder"` + + // Metadata about this particular execution of the build. + BuildMetadata BuildMetadata `json:"metadata,omitempty"` + + // Additional artifacts generated during the build that are not considered + // the “output” of the build but that might be needed during debugging or + // incident response. For example, this might reference logs generated + // during the build and/or a digest of the fully evaluated build + // configuration. + // + // In most cases, this SHOULD NOT contain all intermediate files generated + // during the build. Instead, this SHOULD only contain files that are + // likely to be useful later and that cannot be easily reproduced. + Byproducts []ResourceDescriptor `json:"byproducts,omitempty"` +} + +// ResourceDescriptor describes a particular software artifact or resource +// (mutable or immutable). +// See https://github.com/in-toto/attestation/blob/main/spec/v1.0/resource_descriptor.md +type ResourceDescriptor struct { + // A URI used to identify the resource or artifact globally. This field is + // REQUIRED unless either digest or content is set. + URI string `json:"uri,omitempty"` + + // A set of cryptographic digests of the contents of the resource or + // artifact. This field is REQUIRED unless either uri or content is set. + Digest common.DigestSet `json:"digest,omitempty"` + + // TMachine-readable identifier for distinguishing between descriptors. + Name string `json:"name,omitempty"` + + // The location of the described resource or artifact, if different from the + // uri. + DownloadLocation string `json:"downloadLocation,omitempty"` + + // The MIME Type (i.e., media type) of the described resource or artifact. + MediaType string `json:"mediaType,omitempty"` + + // The contents of the resource or artifact. This field is REQUIRED unless + // either uri or digest is set. + Content []byte `json:"content,omitempty"` + + // This field MAY be used to provide additional information or metadata + // about the resource or artifact that may be useful to the consumer when + // evaluating the attestation against a policy. + Annotations map[string]interface{} `json:"annotations,omitempty"` +} + +// Builder represents the transitive closure of all the entities that are, by +// necessity, trusted to faithfully run the build and record the provenance. +type Builder struct { + // URI indicating the transitive closure of the trusted builder. + ID string `json:"id"` + + // Version numbers of components of the builder. + Version map[string]string `json:"version,omitempty"` + + // Dependencies used by the orchestrator that are not run within the + // workload and that do not affect the build, but might affect the + // provenance generation or security guarantees. + BuilderDependencies []ResourceDescriptor `json:"builderDependencies,omitempty"` +} + +type BuildMetadata struct { + // Identifies this particular build invocation, which can be useful for + // finding associated logs or other ad-hoc analysis. The exact meaning and + // format is defined by builder.id; by default it is treated as opaque and + // case-sensitive. The value SHOULD be globally unique. + InvocationID string `json:"invocationID,omitempty"` + + // The timestamp of when the build started. + StartedOn *time.Time `json:"startedOn,omitempty"` + + // The timestamp of when the build completed. + FinishedOn *time.Time `json:"finishedOn,omitempty"` +} diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go index 59cba86eb..5c36dede1 100644 --- a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/util.go @@ -1,9 +1,15 @@ package in_toto import ( + "encoding/json" + "errors" "fmt" + "reflect" + "strings" ) +var ErrUnknownMetadataType = errors.New("unknown metadata type encountered: not link or layout") + /* Set represents a data structure for set operations. See `NewSet` for how to create a Set, and available Set receivers for useful set operations. @@ -145,3 +151,40 @@ func (s Set) IsSubSet(subset Set) bool { } return true } + +func loadPayload(payloadBytes []byte) (any, error) { + var payload map[string]any + if err := json.Unmarshal(payloadBytes, &payload); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + if payload["_type"] == "link" { + var link Link + if err := checkRequiredJSONFields(payload, reflect.TypeOf(link)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&link); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return link, nil + } else if payload["_type"] == "layout" { + var layout Layout + if err := checkRequiredJSONFields(payload, reflect.TypeOf(layout)); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + decoder := json.NewDecoder(strings.NewReader(string(payloadBytes))) + decoder.DisallowUnknownFields() + if err := decoder.Decode(&layout); err != nil { + return nil, fmt.Errorf("error decoding payload: %w", err) + } + + return layout, nil + } + + return nil, ErrUnknownMetadataType +} diff --git a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go index 2302040f4..2564bd47e 100644 --- a/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go +++ b/src/cmd/linuxkit/vendor/github.com/in-toto/in-toto-golang/in_toto/verifylib.go @@ -12,7 +12,6 @@ import ( "io" "os" "path" - osPath "path" "path/filepath" "reflect" "regexp" @@ -23,6 +22,8 @@ import ( // ErrInspectionRunDirIsSymlink gets thrown if the runDir is a symlink var ErrInspectionRunDirIsSymlink = errors.New("runDir is a symlink. This is a security risk") +var ErrNotLayout = errors.New("verification workflow passed a non-layout") + /* RunInspections iteratively executes the command in the Run field of all inspections of the passed layout, creating unsigned link metadata that records @@ -41,8 +42,8 @@ If executing the inspection command fails, or if the executed command has a non-zero exit code, the first return value is an empty Metablock map and the second return value is the error. */ -func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[string]Metablock, error) { - inspectionMetadata := make(map[string]Metablock) +func RunInspections(layout Layout, runDir string, lineNormalization bool, useDSSE bool) (map[string]Metadata, error) { + inspectionMetadata := make(map[string]Metadata) for _, inspection := range layout.Inspect { @@ -51,14 +52,14 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s paths = []string{runDir} } - linkMb, err := InTotoRun(inspection.Name, runDir, paths, paths, - inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization) + linkEnv, err := InTotoRun(inspection.Name, runDir, paths, paths, + inspection.Run, Key{}, []string{"sha256"}, nil, nil, lineNormalization, false, useDSSE) if err != nil { return nil, err } - retVal := linkMb.Signed.(Link).ByProducts["return-value"] + retVal := linkEnv.GetPayload().(Link).ByProducts["return-value"] if retVal != float64(0) { return nil, fmt.Errorf("inspection command '%s' of inspection '%s'"+ " returned a non-zero value: %d", inspection.Run, inspection.Name, @@ -67,11 +68,11 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s // Dump inspection link to cwd using the short link name format linkName := fmt.Sprintf(LinkNameFormatShort, inspection.Name) - if err := linkMb.Dump(linkName); err != nil { + if err := linkEnv.Dump(linkName); err != nil { fmt.Printf("JSON serialization or writing failed: %s", err) } - inspectionMetadata[inspection.Name] = linkMb + inspectionMetadata[inspection.Name] = linkEnv } return inspectionMetadata, nil } @@ -80,10 +81,10 @@ func RunInspections(layout Layout, runDir string, lineNormalization bool) (map[s // type MATCH. See VerifyArtifacts for more details. func verifyMatchRule(ruleData map[string]string, srcArtifacts map[string]interface{}, srcArtifactQueue Set, - itemsMetadata map[string]Metablock) Set { + itemsMetadata map[string]Metadata) Set { consumed := NewSet() // Get destination link metadata - dstLinkMb, exists := itemsMetadata[ruleData["dstName"]] + dstLinkEnv, exists := itemsMetadata[ruleData["dstName"]] if !exists { // Destination link does not exist, rule can't consume any // artifacts @@ -94,9 +95,9 @@ func verifyMatchRule(ruleData map[string]string, var dstArtifacts map[string]interface{} switch ruleData["dstType"] { case "materials": - dstArtifacts = dstLinkMb.Signed.(Link).Materials + dstArtifacts = dstLinkEnv.GetPayload().(Link).Materials case "products": - dstArtifacts = dstLinkMb.Signed.(Link).Products + dstArtifacts = dstLinkEnv.GetPayload().(Link).Products } // cleanup paths in pattern and artifact maps @@ -140,7 +141,7 @@ func verifyMatchRule(ruleData map[string]string, // Construct corresponding destination artifact path, i.e. // an optional destination prefix plus the source base path - dstPath := path.Clean(osPath.Join(ruleData["dstPrefix"], srcBasePath)) + dstPath := path.Clean(path.Join(ruleData["dstPrefix"], srcBasePath)) // Try to find the corresponding destination artifact dstArtifact, exists := dstArtifacts[dstPath] @@ -180,7 +181,7 @@ DISALLOW rule to fail overall verification, if artifacts are left in the queue that should have been consumed by preceding rules. */ func VerifyArtifacts(items []interface{}, - itemsMetadata map[string]Metablock) error { + itemsMetadata map[string]Metadata) error { // Verify artifact rules for each item in the layout for _, itemI := range items { // The layout item (interface) must be a Link or an Inspection we are only @@ -207,7 +208,7 @@ func VerifyArtifacts(items []interface{}, } // Use the item's name to extract the corresponding link - srcLinkMb, exists := itemsMetadata[itemName] + srcLinkEnv, exists := itemsMetadata[itemName] if !exists { return fmt.Errorf("VerifyArtifacts could not find metadata"+ " for item '%s', got: '%s'", itemName, itemsMetadata) @@ -215,8 +216,8 @@ func VerifyArtifacts(items []interface{}, // Create shortcuts to materials and products (including hashes) reported // by the item's link, required to verify "match" rules - materials := srcLinkMb.Signed.(Link).Materials - products := srcLinkMb.Signed.(Link).Products + materials := srcLinkEnv.GetPayload().(Link).Materials + products := srcLinkEnv.GetPayload().(Link).Products // All other rules only require the material or product paths (without // hashes). We extract them from the corresponding maps and store them as @@ -364,9 +365,9 @@ Products, the first return value is an empty Metablock map and the second return value is the error. */ func ReduceStepsMetadata(layout Layout, - stepsMetadata map[string]map[string]Metablock) (map[string]Metablock, + stepsMetadata map[string]map[string]Metadata) (map[string]Metadata, error) { - stepsMetadataReduced := make(map[string]Metablock) + stepsMetadataReduced := make(map[string]Metadata) for _, step := range layout.Steps { linksPerStep, ok := stepsMetadata[step.Name] @@ -379,16 +380,16 @@ func ReduceStepsMetadata(layout Layout, // Get the first link (could be any link) for the current step, which will // serve as reference link for below comparisons var referenceKeyID string - var referenceLinkMb Metablock - for keyID, linkMb := range linksPerStep { - referenceLinkMb = linkMb + var referenceLinkEnv Metadata + for keyID, linkEnv := range linksPerStep { + referenceLinkEnv = linkEnv referenceKeyID = keyID break } // Only one link, nothing to reduce, take the reference link if len(linksPerStep) == 1 { - stepsMetadataReduced[step.Name] = referenceLinkMb + stepsMetadataReduced[step.Name] = referenceLinkEnv // Multiple links, reduce but first check } else { @@ -396,11 +397,11 @@ func ReduceStepsMetadata(layout Layout, // TODO: What should we do if there are more links, than the // threshold requires, but not all of them are equal? Right now we would // also error. - for keyID, linkMb := range linksPerStep { - if !reflect.DeepEqual(linkMb.Signed.(Link).Materials, - referenceLinkMb.Signed.(Link).Materials) || - !reflect.DeepEqual(linkMb.Signed.(Link).Products, - referenceLinkMb.Signed.(Link).Products) { + for keyID, linkEnv := range linksPerStep { + if !reflect.DeepEqual(linkEnv.GetPayload().(Link).Materials, + referenceLinkEnv.GetPayload().(Link).Materials) || + !reflect.DeepEqual(linkEnv.GetPayload().(Link).Products, + referenceLinkEnv.GetPayload().(Link).Products) { return nil, fmt.Errorf("link '%s' and '%s' have different"+ " artifacts", fmt.Sprintf(LinkNameFormat, step.Name, referenceKeyID), @@ -408,7 +409,7 @@ func ReduceStepsMetadata(layout Layout, } } // We haven't errored out, so we can reduce (i.e take the reference link) - stepsMetadataReduced[step.Name] = referenceLinkMb + stepsMetadataReduced[step.Name] = referenceLinkEnv } } return stepsMetadataReduced, nil @@ -421,7 +422,7 @@ command, as per the layout. Soft verification means that, in case a command does not align, a warning is issued. */ func VerifyStepCommandAlignment(layout Layout, - stepsMetadata map[string]map[string]Metablock) { + stepsMetadata map[string]map[string]Metadata) { for _, step := range layout.Steps { linksPerStep, ok := stepsMetadata[step.Name] // We should never get here, layout verification must fail earlier @@ -430,9 +431,9 @@ func VerifyStepCommandAlignment(layout Layout, "', no link metadata found.") } - for signerKeyID, linkMb := range linksPerStep { + for signerKeyID, linkEnv := range linksPerStep { expectedCommandS := strings.Join(step.ExpectedCommand, " ") - executedCommandS := strings.Join(linkMb.Signed.(Link).Command, " ") + executedCommandS := strings.Join(linkEnv.GetPayload().(Link).Command, " ") if expectedCommandS != executedCommandS { linkName := fmt.Sprintf(LinkNameFormat, step.Name, signerKeyID) @@ -502,11 +503,11 @@ return value is an empty map of Metablock maps and the second return value is the error. */ func VerifyLinkSignatureThesholds(layout Layout, - stepsMetadata map[string]map[string]Metablock, rootCertPool, intermediateCertPool *x509.CertPool) ( - map[string]map[string]Metablock, error) { + stepsMetadata map[string]map[string]Metadata, rootCertPool, intermediateCertPool *x509.CertPool) ( + map[string]map[string]Metadata, error) { // This will stores links with valid signature from an authorized functionary // for all steps - stepsMetadataVerified := make(map[string]map[string]Metablock) + stepsMetadataVerified := make(map[string]map[string]Metadata) // Try to find enough (>= threshold) links each with a valid signature from // distinct authorized functionaries for each step @@ -515,7 +516,7 @@ func VerifyLinkSignatureThesholds(layout Layout, // This will store links with valid signature from an authorized // functionary for the given step - linksPerStepVerified := make(map[string]Metablock) + linksPerStepVerified := make(map[string]Metadata) // Check if there are any links at all for the given step linksPerStep, ok := stepsMetadata[step.Name] @@ -528,12 +529,12 @@ func VerifyLinkSignatureThesholds(layout Layout, // verification passes. Only good links are stored, to verify thresholds // below. isAuthorizedSignature := false - for signerKeyID, linkMb := range linksPerStep { + for signerKeyID, linkEnv := range linksPerStep { for _, authorizedKeyID := range step.PubKeys { if signerKeyID == authorizedKeyID { if verifierKey, ok := layout.Keys[authorizedKeyID]; ok { - if err := linkMb.VerifySignature(verifierKey); err == nil { - linksPerStepVerified[signerKeyID] = linkMb + if err := linkEnv.VerifySignature(verifierKey); err == nil { + linksPerStepVerified[signerKeyID] = linkEnv isAuthorizedSignature = true break } @@ -544,7 +545,7 @@ func VerifyLinkSignatureThesholds(layout Layout, // If the signer's key wasn't in our step's pubkeys array, check the cert pool to // see if the key is known to us. if !isAuthorizedSignature { - sig, err := linkMb.GetSignatureForKeyID(signerKeyID) + sig, err := linkEnv.GetSignatureForKeyID(signerKeyID) if err != nil { stepErr = err continue @@ -563,13 +564,13 @@ func VerifyLinkSignatureThesholds(layout Layout, continue } - err = linkMb.VerifySignature(cert) + err = linkEnv.VerifySignature(cert) if err != nil { stepErr = err continue } - linksPerStepVerified[signerKeyID] = linkMb + linksPerStepVerified[signerKeyID] = linkEnv } } @@ -614,30 +615,30 @@ ignored. Only a preliminary threshold check is performed, that is, if there aren't at least Threshold links for any given step, the first return value is an empty map of Metablock maps and the second return value is the error. */ -func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metablock, error) { - stepsMetadata := make(map[string]map[string]Metablock) +func LoadLinksForLayout(layout Layout, linkDir string) (map[string]map[string]Metadata, error) { + stepsMetadata := make(map[string]map[string]Metadata) for _, step := range layout.Steps { - linksPerStep := make(map[string]Metablock) + linksPerStep := make(map[string]Metadata) // Since we can verify against certificates belonging to a CA, we need to // load any possible links - linkFiles, err := filepath.Glob(osPath.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) + linkFiles, err := filepath.Glob(path.Join(linkDir, fmt.Sprintf(LinkGlobFormat, step.Name))) if err != nil { return nil, err } for _, linkPath := range linkFiles { - var linkMb Metablock - if err := linkMb.Load(linkPath); err != nil { + linkEnv, err := LoadMetadata(linkPath) + if err != nil { continue } // To get the full key from the metadata's signatures, we have to check // for one with the same short id... signerShortKeyID := strings.TrimSuffix(strings.TrimPrefix(filepath.Base(linkPath), step.Name+"."), ".link") - for _, sig := range linkMb.Signatures { + for _, sig := range linkEnv.Sigs() { if strings.HasPrefix(sig.KeyID, signerShortKeyID) { - linksPerStep[sig.KeyID] = linkMb + linksPerStep[sig.KeyID] = linkEnv break } } @@ -677,14 +678,14 @@ Signatures and keys are associated by key id. If the key map is empty, or the Metablock's Signature field does not have a signature for one or more of the passed keys, or a matching signature is invalid, an error is returned. */ -func VerifyLayoutSignatures(layoutMb Metablock, +func VerifyLayoutSignatures(layoutEnv Metadata, layoutKeys map[string]Key) error { if len(layoutKeys) < 1 { return fmt.Errorf("layout verification requires at least one key") } for _, key := range layoutKeys { - if err := layoutMb.VerifySignature(key); err != nil { + if err := layoutEnv.VerifySignature(key); err != nil { return err } } @@ -700,29 +701,35 @@ NOTE: The assumption is that the steps mentioned in the layout are to be performed sequentially. So, the first step mentioned in the layout denotes what comes into the supply chain and the last step denotes what goes out. */ -func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metablock, - stepName string) (Metablock, error) { +func GetSummaryLink(layout Layout, stepsMetadataReduced map[string]Metadata, + stepName string, useDSSE bool) (Metadata, error) { var summaryLink Link - var result Metablock if len(layout.Steps) > 0 { firstStepLink := stepsMetadataReduced[layout.Steps[0].Name] lastStepLink := stepsMetadataReduced[layout.Steps[len(layout.Steps)-1].Name] - summaryLink.Materials = firstStepLink.Signed.(Link).Materials + summaryLink.Materials = firstStepLink.GetPayload().(Link).Materials summaryLink.Name = stepName - summaryLink.Type = firstStepLink.Signed.(Link).Type + summaryLink.Type = firstStepLink.GetPayload().(Link).Type - summaryLink.Products = lastStepLink.Signed.(Link).Products - summaryLink.ByProducts = lastStepLink.Signed.(Link).ByProducts + summaryLink.Products = lastStepLink.GetPayload().(Link).Products + summaryLink.ByProducts = lastStepLink.GetPayload().(Link).ByProducts // Using the last command of the sublayout as the command // of the summary link can be misleading. Is it necessary to // include all the commands executed as part of sublayout? - summaryLink.Command = lastStepLink.Signed.(Link).Command + summaryLink.Command = lastStepLink.GetPayload().(Link).Command } - result.Signed = summaryLink + if useDSSE { + env := &Envelope{} + if err := env.SetPayload(summaryLink); err != nil { + return nil, err + } - return result, nil + return env, nil + } + + return &Metablock{Signed: summaryLink}, nil } /* @@ -731,11 +738,11 @@ so, recursively resolves it and replaces it with a summary link summarizing the steps carried out in the sublayout. */ func VerifySublayouts(layout Layout, - stepsMetadataVerified map[string]map[string]Metablock, - superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metablock, error) { + stepsMetadataVerified map[string]map[string]Metadata, + superLayoutLinkPath string, intermediatePems [][]byte, lineNormalization bool) (map[string]map[string]Metadata, error) { for stepName, linkData := range stepsMetadataVerified { for keyID, metadata := range linkData { - if _, ok := metadata.Signed.(Layout); ok { + if _, ok := metadata.GetPayload().(Layout); ok { layoutKeys := make(map[string]Key) layoutKeys[keyID] = layout.Keys[keyID] @@ -861,55 +868,60 @@ Metablock object. NOTE: Artifact rules of type "create", "modify" and "delete" are currently not supported. */ -func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, +func InTotoVerify(layoutEnv Metadata, layoutKeys map[string]Key, linkDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( - Metablock, error) { - - var summaryLink Metablock - var err error + Metadata, error) { // Verify root signatures - if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { - return summaryLink, err + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err } - // Extract the layout from its Metablock container (for further processing) - layout := layoutMb.Signed.(Layout) + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true + } + + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } // Verify layout expiration if err := VerifyLayoutExpiration(layout); err != nil { - return summaryLink, err + return nil, err } // Substitute parameters in layout - layout, err = SubstituteParameters(layout, parameterDictionary) + layout, err := SubstituteParameters(layout, parameterDictionary) if err != nil { - return summaryLink, err + return nil, err } rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) if err != nil { - return summaryLink, err + return nil, err } // Load links for layout stepsMetadata, err := LoadLinksForLayout(layout, linkDir) if err != nil { - return summaryLink, err + return nil, err } // Verify link signatures stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, stepsMetadata, rootCertPool, intermediateCertPool) if err != nil { - return summaryLink, err + return nil, err } // Verify and resolve sublayouts stepsSublayoutVerified, err := VerifySublayouts(layout, stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) if err != nil { - return summaryLink, err + return nil, err } // Verify command alignment (WARNING only) @@ -922,18 +934,18 @@ func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, stepsMetadataReduced, err := ReduceStepsMetadata(layout, stepsSublayoutVerified) if err != nil { - return summaryLink, err + return nil, err } // Verify artifact rules if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), stepsMetadataReduced); err != nil { - return summaryLink, err + return nil, err } - inspectionMetadata, err := RunInspections(layout, "", lineNormalization) + inspectionMetadata, err := RunInspections(layout, "", lineNormalization, useDSSE) if err != nil { - return summaryLink, err + return nil, err } // Add steps metadata to inspection metadata, because inspection artifact @@ -944,51 +956,48 @@ func InTotoVerify(layoutMb Metablock, layoutKeys map[string]Key, if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), inspectionMetadata); err != nil { - return summaryLink, err + return nil, err } - summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) if err != nil { - return summaryLink, err + return nil, err } return summaryLink, nil } /* -InTotoVerifyWithDirectory provides the same functionality as IntotoVerify, but +InTotoVerifyWithDirectory provides the same functionality as InTotoVerify, but adds the possibility to select a local directory from where the inspections are run. */ -func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, +func InTotoVerifyWithDirectory(layoutEnv Metadata, layoutKeys map[string]Key, linkDir string, runDir string, stepName string, parameterDictionary map[string]string, intermediatePems [][]byte, lineNormalization bool) ( - Metablock, error) { - - var summaryLink Metablock - var err error + Metadata, error) { // runDir sanity checks // check if path exists info, err := os.Stat(runDir) if err != nil { - return Metablock{}, err + return nil, err } // check if runDir is a symlink if info.Mode()&os.ModeSymlink == os.ModeSymlink { - return Metablock{}, ErrInspectionRunDirIsSymlink + return nil, ErrInspectionRunDirIsSymlink } // check if runDir is writable and a directory err = isWritable(runDir) if err != nil { - return Metablock{}, err + return nil, err } // check if runDir is empty (we do not want to overwrite files) // We abuse File.Readdirnames for this action. f, err := os.Open(runDir) if err != nil { - return Metablock{}, err + return nil, err } defer f.Close() // We use Readdirnames(1) for performance reasons, one child node @@ -996,55 +1005,63 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, _, err = f.Readdirnames(1) // if io.EOF gets returned as error the directory is empty if err == io.EOF { - return Metablock{}, err + return nil, err } err = f.Close() if err != nil { - return Metablock{}, err + return nil, err } // Verify root signatures - if err := VerifyLayoutSignatures(layoutMb, layoutKeys); err != nil { - return summaryLink, err + if err := VerifyLayoutSignatures(layoutEnv, layoutKeys); err != nil { + return nil, err } - // Extract the layout from its Metablock container (for further processing) - layout := layoutMb.Signed.(Layout) + useDSSE := false + if _, ok := layoutEnv.(*Envelope); ok { + useDSSE = true + } + + // Extract the layout from its Metadata container (for further processing) + layout, ok := layoutEnv.GetPayload().(Layout) + if !ok { + return nil, ErrNotLayout + } // Verify layout expiration if err := VerifyLayoutExpiration(layout); err != nil { - return summaryLink, err + return nil, err } // Substitute parameters in layout layout, err = SubstituteParameters(layout, parameterDictionary) if err != nil { - return summaryLink, err + return nil, err } rootCertPool, intermediateCertPool, err := LoadLayoutCertificates(layout, intermediatePems) if err != nil { - return summaryLink, err + return nil, err } // Load links for layout stepsMetadata, err := LoadLinksForLayout(layout, linkDir) if err != nil { - return summaryLink, err + return nil, err } // Verify link signatures stepsMetadataVerified, err := VerifyLinkSignatureThesholds(layout, stepsMetadata, rootCertPool, intermediateCertPool) if err != nil { - return summaryLink, err + return nil, err } // Verify and resolve sublayouts stepsSublayoutVerified, err := VerifySublayouts(layout, stepsMetadataVerified, linkDir, intermediatePems, lineNormalization) if err != nil { - return summaryLink, err + return nil, err } // Verify command alignment (WARNING only) @@ -1057,18 +1074,18 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, stepsMetadataReduced, err := ReduceStepsMetadata(layout, stepsSublayoutVerified) if err != nil { - return summaryLink, err + return nil, err } // Verify artifact rules if err = VerifyArtifacts(layout.stepsAsInterfaceSlice(), stepsMetadataReduced); err != nil { - return summaryLink, err + return nil, err } - inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization) + inspectionMetadata, err := RunInspections(layout, runDir, lineNormalization, useDSSE) if err != nil { - return summaryLink, err + return nil, err } // Add steps metadata to inspection metadata, because inspection artifact @@ -1079,12 +1096,12 @@ func InTotoVerifyWithDirectory(layoutMb Metablock, layoutKeys map[string]Key, if err = VerifyArtifacts(layout.inspectAsInterfaceSlice(), inspectionMetadata); err != nil { - return summaryLink, err + return nil, err } - summaryLink, err = GetSummaryLink(layout, stepsMetadataReduced, stepName) + summaryLink, err := GetSummaryLink(layout, stepsMetadataReduced, stepName, useDSSE) if err != nil { - return summaryLink, err + return nil, err } return summaryLink, nil diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/README.md b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/README.md index de264c85a..244ee19c4 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/README.md +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/README.md @@ -14,8 +14,34 @@ This package provides various compression algorithms. [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + # changelog +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + * Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 @@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 * Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 @@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp See changes to v1.15.x * Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 @@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 * July 13, 2022 (v1.15.8) @@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 * May 11, 2022 (v1.15.4) @@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) * Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. @@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) * Feb 17, 2022 (v1.14.3) * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) @@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). @@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle. Compression is almost always worse than the fastest compression level and each write will allocate (a little) memory. -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. # Other packages diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/fast_encoder.go index c8124b5c4..0e8b1630c 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -6,8 +6,10 @@ package flate import ( - "encoding/binary" "fmt" + "math/bits" + + "github.com/klauspost/compress/internal/le" ) type fastEnc interface { @@ -58,11 +60,11 @@ const ( ) func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return le.Load64(b, i) } type tableEntry struct { @@ -134,8 +136,8 @@ func hashLen(u uint64, length, mls uint8) uint32 { // matchlen will return the match length between offsets and t in src. // The maximum length returned is maxMatchLength - 4. // It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { +func (e *fastGen) matchlen(s, t int, src []byte) int32 { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } @@ -149,18 +151,34 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) + s1 := min(s+maxMatchLength-4, len(src)) + left := s1 - s + n := int32(0) + for left >= 8 { + diff := le.Load64(src, s) ^ le.Load64(src, t) + if diff != 0 { + return n + int32(bits.TrailingZeros64(diff)>>3) + } + s += 8 + t += 8 + n += 8 + left -= 8 } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) + a := src[s:s1] + b := src[t:] + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n } // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { +func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) @@ -176,7 +194,28 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { } } // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) + left := len(src) - s + n := int32(0) + for left >= 8 { + diff := le.Load64(src, s) ^ le.Load64(src, t) + if diff != 0 { + return n + int32(bits.TrailingZeros64(diff)>>3) + } + s += 8 + t += 8 + n += 8 + left -= 8 + } + + a := src[s:] + b := src[t:] + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n } // Reset the encoding table. diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index f70594c34..afdc8c053 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -5,10 +5,11 @@ package flate import ( - "encoding/binary" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -438,7 +439,7 @@ func (w *huffmanBitWriter) writeOutBits() { n := w.nbytes // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) + le.Store64(w.bytes[n:], bits) n += 6 if n >= bufferFlushSize { @@ -854,7 +855,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -882,7 +883,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -905,7 +906,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -931,7 +932,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -953,7 +954,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -1107,7 +1108,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1136,7 +1137,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level1.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level1.go index 703b9a89a..c3581a342 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level1.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,9 +1,9 @@ package flate import ( - "encoding/binary" "fmt" - "math/bits" + + "github.com/klauspost/compress/internal/le" ) // fastGen maintains the table for matches, @@ -77,6 +77,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry + var t int32 for { nextHash := hashLen(cv, tableBits, hashBytes) candidate = e.table[nextHash] @@ -88,9 +89,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -103,8 +103,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now >>= 8 e.table[nextHash] = tableEntry{offset: s + e.cur} - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -120,36 +120,10 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // literal bytes prior to s. // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) { s-- t-- l++ @@ -221,8 +195,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t > maxMatchOffset || uint32(x) != load3232(src, t) { cv = x >> 8 s++ break diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level2.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level2.go index 876dfbe30..c8d047f2d 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level2.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level2.go @@ -126,7 +126,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level3.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level3.go index 7aa2b72a1..33f9fb152 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level3.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level3.go @@ -135,7 +135,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. // t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level4.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level4.go index 23c08b325..88509e197 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level4.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level4.go @@ -98,19 +98,19 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { e.bTable[nextHashL] = entry t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // We got a long match. Use that. break } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... lCandidate = e.bTable[hash7(next, tableBits)] // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + lOff := lCandidate.offset - e.cur + if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) { l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) if l2 > l1 { s = nextS @@ -127,7 +127,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // them as literal bytes. // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level5.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level5.go index 1f61ec182..6e5c21502 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level5.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level5.go @@ -111,16 +111,16 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { + l = e.matchlen(int(s+4), int(t+4), src) + 4 + ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4 if ml1 > l { t = t2 l = ml1 @@ -130,7 +130,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -140,9 +140,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 + l = e.matchlen(int(s+4), int(t+4), src) + 4 lCandidate = e.bTable[nextHashL] // Store the next match @@ -153,8 +153,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -164,8 +164,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -185,9 +185,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if l == 0 { // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 + l = e.matchlenLong(int(s+4), int(t+4), src) + 4 } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) + l += e.matchlenLong(int(s+l), int(t+l), src) } // Try to locate a better match by checking the end of best match... @@ -203,7 +203,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { s2 := s + skipBeginning off := s2 - t2 if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 @@ -423,14 +423,14 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -442,7 +442,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -452,7 +452,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 lCandidate = e.bTable[nextHashL] @@ -465,7 +465,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + if load3232(src, t2) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -476,7 +476,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level6.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level6.go index f1e9d98fa..96f5bb430 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level6.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/level6.go @@ -113,7 +113,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Long candidate matches at least 4 bytes. // Store the next match @@ -123,9 +123,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Check the previous long candidate as well. t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { + l = e.matchlen(int(s+4), int(t+4), src) + 4 + ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4 if ml1 > l { t = t2 l = ml1 @@ -136,7 +136,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // Current value did not match, but check if previous long value does. t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -146,9 +146,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 + l = e.matchlen(int(s+4), int(t+4), src) + 4 // Look up next long candidate (at nextS) lCandidate = e.bTable[nextHashL] @@ -162,7 +162,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { const repOff = 1 t2 := s - repeat + repOff if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4 if ml > l { t = t2 l = ml @@ -175,8 +175,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 = lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -186,8 +186,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -207,9 +207,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 + l = e.matchlenLong(int(s+4), int(t+4), src) + 4 } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) + l += e.matchlenLong(int(s+l), int(t+l), src) } // Try to locate a better match by checking the end-of-match... @@ -227,7 +227,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { off := s2 - t2 if off < maxMatchOffset { if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 @@ -237,7 +237,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t2 = eLong.Prev.offset - e.cur - l + skipBeginning off := s2 - t2 if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go deleted file mode 100644 index 4bd388584..000000000 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s deleted file mode 100644 index 0782b86e3..000000000 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_generic.go index ad5cd814b..6149384aa 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_generic.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -1,27 +1,29 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. package flate import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { break @@ -29,5 +31,4 @@ func matchLen(a, b []byte) (n int) { n++ } return n - } diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/stateless.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/stateless.go index f3d4139ef..13b9b100d 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/flate/stateless.go @@ -4,6 +4,8 @@ import ( "io" "math" "sync" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -152,18 +154,11 @@ func hashSL(u uint32) uint32 { } func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return le.Load32(b, i) } func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return le.Load64(b, i) } func statelessEnc(dst *tokens, src []byte, startAt int16) { diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/huff0/bitreader.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/huff0/bitreader.go index e36d9742f..bfc7a523d 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -6,10 +6,11 @@ package huff0 import ( - "encoding/binary" "errors" "fmt" "io" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error { return nil } -// peekBitsFast requires that at least one bit is requested every time. +// peekByteFast requires that at least one byte is requested every time. // There are no checks if the buffer is filled. func (b *bitReaderBytes) peekByteFast() uint8 { got := uint8(b.value >> 56) @@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() { } // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() { // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. func (b *bitReaderBytes) fillFastStart() { // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() { if b.bitsRead < 32 { return } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + if b.off >= 4 { + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() { return } - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 @@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() { // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/le.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/le.go new file mode 100644 index 000000000..e54909e16 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/le.go @@ -0,0 +1,5 @@ +package le + +type Indexer interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go new file mode 100644 index 000000000..0cfb5c0e2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -0,0 +1,42 @@ +//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine + +package le + +import ( + "encoding/binary" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + return b[i] +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + return binary.LittleEndian.Uint16(b[i:]) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, v) +} diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go new file mode 100644 index 000000000..ada45cd90 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -0,0 +1,55 @@ +// We enable 64 bit LE platforms: + +//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine + +package le + +import ( + "unsafe" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + //return binary.LittleEndian.Uint32(b[i:]) + //return *(*uint32)(unsafe.Pointer(&b[i])) + return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + //return binary.LittleEndian.Uint64(b[i:]) + //return *(*uint64)(unsafe.Pointer(&b[i])) + return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + //binary.LittleEndian.PutUint16(b, v) + *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + //binary.LittleEndian.PutUint32(b, v) + *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + //binary.LittleEndian.PutUint64(b, v) + *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +} diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/s2sx.mod b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/s2sx.mod index 5a4412f90..81bda5e29 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/s2sx.mod +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,3 @@ module github.com/klauspost/compress -go 1.19 - +go 1.22 diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/README.md b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/README.md index 92e2347bb..c11d7fa28 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/README.md +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/README.md @@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. -This package is pure Go and without use of "unsafe". +This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features. The `zstd` package is provided as open source software using a Go standard license. diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/bitreader.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/bitreader.go index 25ca98394..d41e3e170 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -5,11 +5,12 @@ package zstd import ( - "encoding/binary" "errors" "fmt" "io" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -18,6 +19,7 @@ import ( type bitReader struct { in []byte value uint64 // Maybe use [16]byte, but shifting is awkward. + cursor int // offset where next read should end bitsRead uint8 } @@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error { if v == 0 { return errors.New("corrupt stream, did not find end of stream") } + b.cursor = len(in) b.bitsRead = 64 b.value = 0 if len(in) >= 8 { @@ -67,18 +70,15 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) + b.cursor -= 8 + b.value = le.Load64(b.in, b.cursor) b.bitsRead = 0 } @@ -87,25 +87,23 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + if b.cursor >= 4 { + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 return } - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] + b.bitsRead -= uint8(8 * b.cursor) + for b.cursor > 0 { + b.cursor -= 1 + b.value = (b.value << 8) | uint64(b.in[b.cursor]) } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 + return b.cursor == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -115,13 +113,14 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) + return 8*uint(b.cursor) + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReader) close() error { // Release reference. b.in = nil + b.cursor = 0 if !b.finished() { return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) } diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockdec.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockdec.go index 9c28840c3..0dd742fd2 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,14 +5,10 @@ package zstd import ( - "bytes" - "encoding/binary" "errors" "fmt" "hash/crc32" "io" - "os" - "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { println("initializing sequences:", err) return err } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } return nil } diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockenc.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockenc.go index 32a7f401d..fd35ea148 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -9,6 +9,7 @@ import ( "fmt" "math" "math/bits" + "slices" "github.com/klauspost/compress/huff0" ) @@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int { // All 0 return 0 } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) + cnt := int(slices.Max(hist[:maxSym])) if cnt == len(data) { // RLE return 0 @@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() { } } } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } if debugAsserts && mlMax > maxMatchLengthSymbol { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) } @@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) } - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) + b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1]))) + b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1]))) + b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1]))) } diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/decoder.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/decoder.go index bbca17234..ea2a19376 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. +// Returns the number of bytes read and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { var n int @@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.bBuf = nil if frame.history.decoders.br != nil { frame.history.decoders.br.in = nil + frame.history.decoders.br.cursor = 0 } d.decoders <- block }() diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/enc_base.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/enc_base.go index 5ca46038a..7d250c67f 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(err) } if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) + err := fmt.Sprintf("t (%d) < 0", t) panic(err) } if s-t > e.maxMatchOff { diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index 57b9c31c0..bea1779e9 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -7,20 +7,25 @@ package zstd import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec.go index d7fe6d82d..9a7de82f9 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { return io.ErrUnexpectedEOF } var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index f5591fa1e..a708ca6d3 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -7,9 +7,9 @@ TEXT ·sequenceDecs_decode_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -335,9 +335,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -634,9 +634,9 @@ error_overread: TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -920,9 +920,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1787,9 +1787,9 @@ empty_seqs: TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2281,8 +2281,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2349,9 +2349,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2801,8 +2801,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2869,9 +2869,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -3465,8 +3465,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3533,9 +3533,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -4087,8 +4087,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 2fb35b788..7cec2197c 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqenc.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqenc.go index 8014174a7..65045eabd 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{ func llCode(litLength uint32) uint8 { const llDeltaCode = 19 if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) return llCodeTable[litLength&63] } return uint8(highBit(litLength)) + llDeltaCode @@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{ func mlCode(mlBase uint32) uint8 { const mlDeltaCode = 36 if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) return mlCodeTable[mlBase&127] } return uint8(highBit(mlBase)) + mlDeltaCode diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/snappy.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/snappy.go index ec13594e8..a17381b8f 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue @@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { } n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue diff --git a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/zstd.go b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/zstd.go index 066bef2a4..6252b46ae 100644 --- a/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/src/cmd/linuxkit/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -5,10 +5,11 @@ package zstd import ( "bytes" - "encoding/binary" "errors" "log" "math" + + "github.com/klauspost/compress/internal/le" ) // enable debug printing @@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) { } func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) + return le.Load64(b, i) } type byter interface { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.pb.go index a180b3ae5..8d6442afa 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/api/services/control/control.proto @@ -16,6 +16,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -75,16 +76,15 @@ func (BuildHistoryEventType) EnumDescriptor() ([]byte, []int) { } type PruneRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` + All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` + KeepDuration int64 `protobuf:"varint,3,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` + ReservedSpace int64 `protobuf:"varint,4,opt,name=reservedSpace,proto3" json:"reservedSpace,omitempty"` + MaxUsedSpace int64 `protobuf:"varint,5,opt,name=maxUsedSpace,proto3" json:"maxUsedSpace,omitempty"` + MinFreeSpace int64 `protobuf:"varint,6,opt,name=minFreeSpace,proto3" json:"minFreeSpace,omitempty"` unknownFields protoimpl.UnknownFields - - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` - All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` - KeepDuration int64 `protobuf:"varint,3,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` - ReservedSpace int64 `protobuf:"varint,4,opt,name=reservedSpace,proto3" json:"reservedSpace,omitempty"` - MaxUsedSpace int64 `protobuf:"varint,5,opt,name=maxUsedSpace,proto3" json:"maxUsedSpace,omitempty"` - MinFreeSpace int64 `protobuf:"varint,6,opt,name=minFreeSpace,proto3" json:"minFreeSpace,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PruneRequest) Reset() { @@ -160,11 +160,11 @@ func (x *PruneRequest) GetMinFreeSpace() int64 { } type DiskUsageRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` + AgeLimit int64 `protobuf:"varint,2,opt,name=ageLimit,proto3" json:"ageLimit,omitempty"` unknownFields protoimpl.UnknownFields - - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DiskUsageRequest) Reset() { @@ -204,12 +204,18 @@ func (x *DiskUsageRequest) GetFilter() []string { return nil } -type DiskUsageResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *DiskUsageRequest) GetAgeLimit() int64 { + if x != nil { + return x.AgeLimit + } + return 0 +} - Record []*UsageRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` +type DiskUsageResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Record []*UsageRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DiskUsageResponse) Reset() { @@ -250,23 +256,22 @@ func (x *DiskUsageResponse) GetRecord() []*UsageRecord { } type UsageRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` - InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` - Size int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` + InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` + Size int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` // Deprecated: Marked as deprecated in github.com/moby/buildkit/api/services/control/control.proto. - Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` - CreatedAt *timestamp.Timestamp `protobuf:"bytes,6,opt,name=CreatedAt,proto3" json:"CreatedAt,omitempty"` - LastUsedAt *timestamp.Timestamp `protobuf:"bytes,7,opt,name=LastUsedAt,proto3" json:"LastUsedAt,omitempty"` - UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` - Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` - RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"` - Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"` - Parents []string `protobuf:"bytes,12,rep,name=Parents,proto3" json:"Parents,omitempty"` + Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` + CreatedAt *timestamp.Timestamp `protobuf:"bytes,6,opt,name=CreatedAt,proto3" json:"CreatedAt,omitempty"` + LastUsedAt *timestamp.Timestamp `protobuf:"bytes,7,opt,name=LastUsedAt,proto3" json:"LastUsedAt,omitempty"` + UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` + Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` + RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"` + Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"` + Parents []string `protobuf:"bytes,12,rep,name=Parents,proto3" json:"Parents,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UsageRecord) Reset() { @@ -385,26 +390,26 @@ func (x *UsageRecord) GetParents() []string { } type SolveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"` // ExporterDeprecated and ExporterAttrsDeprecated are deprecated in favor // of the new Exporters. If these fields are set, then they will be // appended to the Exporters field if Exporters was not explicitly set. ExporterDeprecated string `protobuf:"bytes,3,opt,name=ExporterDeprecated,proto3" json:"ExporterDeprecated,omitempty"` - ExporterAttrsDeprecated map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrsDeprecated,proto3" json:"ExporterAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ExporterAttrsDeprecated map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrsDeprecated,proto3" json:"ExporterAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Cache *CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache,omitempty"` Entitlements []string `protobuf:"bytes,9,rep,name=Entitlements,proto3" json:"Entitlements,omitempty"` - FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Internal bool `protobuf:"varint,11,opt,name=Internal,proto3" json:"Internal,omitempty"` // Internal builds are not recorded in build history SourcePolicy *pb1.Policy `protobuf:"bytes,12,opt,name=SourcePolicy,proto3" json:"SourcePolicy,omitempty"` Exporters []*Exporter `protobuf:"bytes,13,rep,name=Exporters,proto3" json:"Exporters,omitempty"` + EnableSessionExporter bool `protobuf:"varint,14,opt,name=EnableSessionExporter,proto3" json:"EnableSessionExporter,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SolveRequest) Reset() { @@ -528,11 +533,15 @@ func (x *SolveRequest) GetExporters() []*Exporter { return nil } -type CacheOptions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *SolveRequest) GetEnableSessionExporter() bool { + if x != nil { + return x.EnableSessionExporter + } + return false +} +type CacheOptions struct { + state protoimpl.MessageState `protogen:"open.v1"` // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. // When ExportRefDeprecated is set, the solver appends // {.Type = "registry", .Attrs = ExportAttrs.add("ref", ExportRef)} @@ -545,11 +554,13 @@ type CacheOptions struct { ImportRefsDeprecated []string `protobuf:"bytes,2,rep,name=ImportRefsDeprecated,proto3" json:"ImportRefsDeprecated,omitempty"` // ExportAttrsDeprecated is deprecated since BuildKit v0.4.0. // See the description of ExportRefDeprecated. - ExportAttrsDeprecated map[string]string `protobuf:"bytes,3,rep,name=ExportAttrsDeprecated,proto3" json:"ExportAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ExportAttrsDeprecated map[string]string `protobuf:"bytes,3,rep,name=ExportAttrsDeprecated,proto3" json:"ExportAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Exports was introduced in BuildKit v0.4.0. Exports []*CacheOptionsEntry `protobuf:"bytes,4,rep,name=Exports,proto3" json:"Exports,omitempty"` // Imports was introduced in BuildKit v0.4.0. - Imports []*CacheOptionsEntry `protobuf:"bytes,5,rep,name=Imports,proto3" json:"Imports,omitempty"` + Imports []*CacheOptionsEntry `protobuf:"bytes,5,rep,name=Imports,proto3" json:"Imports,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CacheOptions) Reset() { @@ -618,15 +629,14 @@ func (x *CacheOptions) GetImports() []*CacheOptionsEntry { } type CacheOptionsEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Type is like "registry" or "local" Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` // Attrs are like mode=(min,max), ref=example.com:5000/foo/bar . // See cache importer/exporter implementations' documentation. - Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CacheOptionsEntry) Reset() { @@ -674,11 +684,10 @@ func (x *CacheOptionsEntry) GetAttrs() map[string]string { } type SolveResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SolveResponse) Reset() { @@ -719,11 +728,10 @@ func (x *SolveResponse) GetExporterResponse() map[string]string { } type StatusRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusRequest) Reset() { @@ -764,14 +772,13 @@ func (x *StatusRequest) GetRef() string { } type StatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes,proto3" json:"vertexes,omitempty"` + Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses,proto3" json:"statuses,omitempty"` + Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs,proto3" json:"logs,omitempty"` + Warnings []*VertexWarning `protobuf:"bytes,4,rep,name=warnings,proto3" json:"warnings,omitempty"` unknownFields protoimpl.UnknownFields - - Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes,proto3" json:"vertexes,omitempty"` - Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses,proto3" json:"statuses,omitempty"` - Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs,proto3" json:"logs,omitempty"` - Warnings []*VertexWarning `protobuf:"bytes,4,rep,name=warnings,proto3" json:"warnings,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatusResponse) Reset() { @@ -833,18 +840,17 @@ func (x *StatusResponse) GetWarnings() []*VertexWarning { } type Vertex struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + Inputs []string `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` + Started *timestamp.Timestamp `protobuf:"bytes,5,opt,name=started,proto3" json:"started,omitempty"` + Completed *timestamp.Timestamp `protobuf:"bytes,6,opt,name=completed,proto3" json:"completed,omitempty"` + Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` // typed errors? + ProgressGroup *pb.ProgressGroup `protobuf:"bytes,8,opt,name=progressGroup,proto3" json:"progressGroup,omitempty"` unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` - Inputs []string `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` - Started *timestamp.Timestamp `protobuf:"bytes,5,opt,name=started,proto3" json:"started,omitempty"` - Completed *timestamp.Timestamp `protobuf:"bytes,6,opt,name=completed,proto3" json:"completed,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` // typed errors? - ProgressGroup *pb.ProgressGroup `protobuf:"bytes,8,opt,name=progressGroup,proto3" json:"progressGroup,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Vertex) Reset() { @@ -934,18 +940,17 @@ func (x *Vertex) GetProgressGroup() *pb.ProgressGroup { } type VertexStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Vertex string `protobuf:"bytes,2,opt,name=vertex,proto3" json:"vertex,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Started *timestamp.Timestamp `protobuf:"bytes,7,opt,name=started,proto3" json:"started,omitempty"` + Completed *timestamp.Timestamp `protobuf:"bytes,8,opt,name=completed,proto3" json:"completed,omitempty"` unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Vertex string `protobuf:"bytes,2,opt,name=vertex,proto3" json:"vertex,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,6,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Started *timestamp.Timestamp `protobuf:"bytes,7,opt,name=started,proto3" json:"started,omitempty"` - Completed *timestamp.Timestamp `protobuf:"bytes,8,opt,name=completed,proto3" json:"completed,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VertexStatus) Reset() { @@ -1035,14 +1040,13 @@ func (x *VertexStatus) GetCompleted() *timestamp.Timestamp { } type VertexLog struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Vertex string `protobuf:"bytes,1,opt,name=vertex,proto3" json:"vertex,omitempty"` + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` + Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` unknownFields protoimpl.UnknownFields - - Vertex string `protobuf:"bytes,1,opt,name=vertex,proto3" json:"vertex,omitempty"` - Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` - Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VertexLog) Reset() { @@ -1104,17 +1108,16 @@ func (x *VertexLog) GetMsg() []byte { } type VertexWarning struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Vertex string `protobuf:"bytes,1,opt,name=vertex,proto3" json:"vertex,omitempty"` + Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` + Short []byte `protobuf:"bytes,3,opt,name=short,proto3" json:"short,omitempty"` + Detail [][]byte `protobuf:"bytes,4,rep,name=detail,proto3" json:"detail,omitempty"` + Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` + Info *pb.SourceInfo `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` + Ranges []*pb.Range `protobuf:"bytes,7,rep,name=ranges,proto3" json:"ranges,omitempty"` unknownFields protoimpl.UnknownFields - - Vertex string `protobuf:"bytes,1,opt,name=vertex,proto3" json:"vertex,omitempty"` - Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` - Short []byte `protobuf:"bytes,3,opt,name=short,proto3" json:"short,omitempty"` - Detail [][]byte `protobuf:"bytes,4,rep,name=detail,proto3" json:"detail,omitempty"` - Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` - Info *pb.SourceInfo `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` - Ranges []*pb.Range `protobuf:"bytes,7,rep,name=ranges,proto3" json:"ranges,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VertexWarning) Reset() { @@ -1197,11 +1200,10 @@ func (x *VertexWarning) GetRanges() []*pb.Range { } type BytesMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BytesMessage) Reset() { @@ -1242,11 +1244,10 @@ func (x *BytesMessage) GetData() []byte { } type ListWorkersRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` // containerd style unknownFields protoimpl.UnknownFields - - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` // containerd style + sizeCache protoimpl.SizeCache } func (x *ListWorkersRequest) Reset() { @@ -1287,11 +1288,10 @@ func (x *ListWorkersRequest) GetFilter() []string { } type ListWorkersResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Record []*types.WorkerRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` unknownFields protoimpl.UnknownFields - - Record []*types.WorkerRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ListWorkersResponse) Reset() { @@ -1332,9 +1332,9 @@ func (x *ListWorkersResponse) GetRecord() []*types.WorkerRecord { } type InfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *InfoRequest) Reset() { @@ -1368,11 +1368,10 @@ func (*InfoRequest) Descriptor() ([]byte, []int) { } type InfoResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` BuildkitVersion *types.BuildkitVersion `protobuf:"bytes,1,opt,name=buildkitVersion,proto3" json:"buildkitVersion,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *InfoResponse) Reset() { @@ -1413,15 +1412,14 @@ func (x *InfoResponse) GetBuildkitVersion() *types.BuildkitVersion { } type BuildHistoryRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"` + Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"` + EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"` + Filter []string `protobuf:"bytes,4,rep,name=Filter,proto3" json:"Filter,omitempty"` + Limit int32 `protobuf:"varint,5,opt,name=Limit,proto3" json:"Limit,omitempty"` unknownFields protoimpl.UnknownFields - - ActiveOnly bool `protobuf:"varint,1,opt,name=ActiveOnly,proto3" json:"ActiveOnly,omitempty"` - Ref string `protobuf:"bytes,2,opt,name=Ref,proto3" json:"Ref,omitempty"` - EarlyExit bool `protobuf:"varint,3,opt,name=EarlyExit,proto3" json:"EarlyExit,omitempty"` - Filter []string `protobuf:"bytes,4,rep,name=Filter,proto3" json:"Filter,omitempty"` - Limit int32 `protobuf:"varint,5,opt,name=Limit,proto3" json:"Limit,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BuildHistoryRequest) Reset() { @@ -1490,12 +1488,11 @@ func (x *BuildHistoryRequest) GetLimit() int32 { } type BuildHistoryEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type BuildHistoryEventType `protobuf:"varint,1,opt,name=type,proto3,enum=moby.buildkit.v1.BuildHistoryEventType" json:"type,omitempty"` + Record *BuildHistoryRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` unknownFields protoimpl.UnknownFields - - Type BuildHistoryEventType `protobuf:"varint,1,opt,name=type,proto3,enum=moby.buildkit.v1.BuildHistoryEventType" json:"type,omitempty"` - Record *BuildHistoryRecord `protobuf:"bytes,2,opt,name=record,proto3" json:"record,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BuildHistoryEvent) Reset() { @@ -1543,21 +1540,18 @@ func (x *BuildHistoryEvent) GetRecord() *BuildHistoryRecord { } type BuildHistoryRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendAttrs map[string]string `protobuf:"bytes,3,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FrontendAttrs map[string]string `protobuf:"bytes,3,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Exporters []*Exporter `protobuf:"bytes,4,rep,name=Exporters,proto3" json:"Exporters,omitempty"` Error *status.Status `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` CreatedAt *timestamp.Timestamp `protobuf:"bytes,6,opt,name=CreatedAt,proto3" json:"CreatedAt,omitempty"` CompletedAt *timestamp.Timestamp `protobuf:"bytes,7,opt,name=CompletedAt,proto3" json:"CompletedAt,omitempty"` Logs *Descriptor `protobuf:"bytes,8,opt,name=logs,proto3" json:"logs,omitempty"` - ExporterResponse map[string]string `protobuf:"bytes,9,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ExporterResponse map[string]string `protobuf:"bytes,9,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Result *BuildResultInfo `protobuf:"bytes,10,opt,name=Result,proto3" json:"Result,omitempty"` - Results map[string]*BuildResultInfo `protobuf:"bytes,11,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Results map[string]*BuildResultInfo `protobuf:"bytes,11,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Generation int32 `protobuf:"varint,12,opt,name=Generation,proto3" json:"Generation,omitempty"` Trace *Descriptor `protobuf:"bytes,13,opt,name=trace,proto3" json:"trace,omitempty"` Pinned bool `protobuf:"varint,14,opt,name=pinned,proto3" json:"pinned,omitempty"` @@ -1566,6 +1560,8 @@ type BuildHistoryRecord struct { NumCompletedSteps int32 `protobuf:"varint,17,opt,name=numCompletedSteps,proto3" json:"numCompletedSteps,omitempty"` ExternalError *Descriptor `protobuf:"bytes,18,opt,name=externalError,proto3" json:"externalError,omitempty"` NumWarnings int32 `protobuf:"varint,19,opt,name=numWarnings,proto3" json:"numWarnings,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BuildHistoryRecord) Reset() { @@ -1732,14 +1728,13 @@ func (x *BuildHistoryRecord) GetNumWarnings() int32 { } type UpdateBuildHistoryRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Pinned bool `protobuf:"varint,2,opt,name=Pinned,proto3" json:"Pinned,omitempty"` + Delete bool `protobuf:"varint,3,opt,name=Delete,proto3" json:"Delete,omitempty"` + Finalize bool `protobuf:"varint,4,opt,name=Finalize,proto3" json:"Finalize,omitempty"` unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Pinned bool `protobuf:"varint,2,opt,name=Pinned,proto3" json:"Pinned,omitempty"` - Delete bool `protobuf:"varint,3,opt,name=Delete,proto3" json:"Delete,omitempty"` - Finalize bool `protobuf:"varint,4,opt,name=Finalize,proto3" json:"Finalize,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UpdateBuildHistoryRequest) Reset() { @@ -1801,9 +1796,9 @@ func (x *UpdateBuildHistoryRequest) GetFinalize() bool { } type UpdateBuildHistoryResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UpdateBuildHistoryResponse) Reset() { @@ -1837,14 +1832,13 @@ func (*UpdateBuildHistoryResponse) Descriptor() ([]byte, []int) { } type Descriptor struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` + Digest string `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - MediaType string `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"` - Digest string `protobuf:"bytes,2,opt,name=digest,proto3" json:"digest,omitempty"` - Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - Annotations map[string]string `protobuf:"bytes,5,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *Descriptor) Reset() { @@ -1906,13 +1900,12 @@ func (x *Descriptor) GetAnnotations() map[string]string { } type BuildResultInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ResultDeprecated *Descriptor `protobuf:"bytes,1,opt,name=ResultDeprecated,proto3" json:"ResultDeprecated,omitempty"` - Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"` - Results map[int64]*Descriptor `protobuf:"bytes,3,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + ResultDeprecated *Descriptor `protobuf:"bytes,1,opt,name=ResultDeprecated,proto3" json:"ResultDeprecated,omitempty"` + Attestations []*Descriptor `protobuf:"bytes,2,rep,name=Attestations,proto3" json:"Attestations,omitempty"` + Results map[int64]*Descriptor `protobuf:"bytes,3,rep,name=Results,proto3" json:"Results,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BuildResultInfo) Reset() { @@ -1968,14 +1961,13 @@ func (x *BuildResultInfo) GetResults() map[int64]*Descriptor { // Exporter describes the output exporter type Exporter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Type identifies the exporter Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` // Attrs specifies exporter configuration - Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Exporter) Reset() { @@ -2024,490 +2016,232 @@ func (x *Exporter) GetAttrs() map[string]string { var File_github_com_moby_buildkit_api_services_control_control_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x1a, - 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, - 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x72, 0x2f, 0x70, 0x62, 0x2f, 0x6f, 0x70, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x35, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, - 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xca, 0x01, 0x0a, 0x0c, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, 0x6c, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x6b, 0x65, - 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x0c, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x24, - 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x55, - 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x46, - 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, - 0x6d, 0x69, 0x6e, 0x46, 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0x2a, 0x0a, 0x10, - 0x44, 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x4a, 0x0a, 0x11, 0x44, 0x69, 0x73, 0x6b, - 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x35, 0x0a, - 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x22, 0x87, 0x03, 0x0a, 0x0b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, - 0x63, 0x6f, 0x72, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x4d, 0x75, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x49, 0x6e, 0x55, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x49, - 0x6e, 0x55, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1a, 0x0a, 0x06, 0x50, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x50, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3a, - 0x0a, 0x0a, 0x4c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, - 0x4c, 0x61, 0x73, 0x74, 0x55, 0x73, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x55, 0x73, - 0x61, 0x67, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, - 0x55, 0x73, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0a, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x53, 0x68, - 0x61, 0x72, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbe, - 0x07, 0x0a, 0x0c, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, - 0x66, 0x12, 0x2e, 0x0a, 0x0a, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x44, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, - 0x64, 0x12, 0x75, 0x0a, 0x17, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x41, 0x74, 0x74, - 0x72, 0x73, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x73, - 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x17, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x73, 0x44, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x57, - 0x0a, 0x0d, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, - 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, - 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x12, 0x34, 0x0a, 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x22, 0x0a, - 0x0c, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x09, 0x52, 0x0c, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x5a, 0x0a, 0x0e, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, - 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1a, 0x0a, - 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x49, 0x0a, 0x0c, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0c, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x38, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, - 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, - 0x74, 0x65, 0x72, 0x52, 0x09, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x4a, - 0x0a, 0x1c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x73, 0x44, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x40, 0x0a, 0x12, 0x46, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x13, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xad, 0x03, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x30, 0x0a, 0x13, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x66, 0x44, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x66, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x12, 0x32, 0x0a, 0x14, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x66, 0x73, - 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x14, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x66, 0x73, 0x44, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x6f, 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x41, 0x74, 0x74, 0x72, 0x73, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x74, 0x74, 0x72, - 0x73, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x74, 0x74, 0x72, 0x73, 0x44, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x07, 0x45, 0x78, 0x70, 0x6f, 0x72, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x3d, 0x0a, 0x07, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, - 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x49, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x73, 0x1a, 0x48, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x41, - 0x74, 0x74, 0x72, 0x73, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0xa7, 0x01, 0x0a, 0x11, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x41, 0x74, 0x74, - 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x2e, 0x41, 0x74, - 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x41, 0x74, 0x74, 0x72, 0x73, 0x1a, - 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb7, 0x01, 0x0a, 0x0d, 0x53, 0x6f, - 0x6c, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x10, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x45, 0x78, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x1a, 0x43, - 0x0a, 0x15, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x21, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x22, 0xf0, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x08, 0x76, 0x65, 0x72, - 0x74, 0x65, 0x78, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x56, - 0x65, 0x72, 0x74, 0x65, 0x78, 0x52, 0x08, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x65, 0x73, 0x12, - 0x3a, 0x0a, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x12, 0x2f, 0x0a, 0x04, 0x6c, - 0x6f, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, - 0x74, 0x65, 0x78, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x08, - 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x52, - 0x08, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x06, 0x56, 0x65, - 0x72, 0x74, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x69, 0x6e, - 0x70, 0x75, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, - 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x37, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, - 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, - 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x22, - 0xa4, 0x02, 0x0a, 0x0c, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, - 0x12, 0x16, 0x0a, 0x06, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x38, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x34, 0x0a, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x07, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x12, 0x38, 0x0a, 0x09, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x87, 0x01, 0x0a, 0x09, 0x56, 0x65, 0x72, 0x74, 0x65, - 0x78, 0x4c, 0x6f, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x12, 0x38, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, - 0x0a, 0x03, 0x6d, 0x73, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x03, 0x6d, 0x73, 0x67, - 0x22, 0xc4, 0x01, 0x0a, 0x0d, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x57, 0x61, 0x72, 0x6e, 0x69, - 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x76, 0x65, 0x72, 0x74, 0x65, 0x78, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, - 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x05, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x12, 0x22, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, - 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, - 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, - 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x22, 0x22, 0x0a, 0x0c, 0x42, 0x79, 0x74, 0x65, 0x73, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2c, 0x0a, 0x12, 0x4c, - 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x53, 0x0a, 0x13, 0x4c, 0x69, 0x73, - 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x3c, 0x0a, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0x0d, - 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x61, 0x0a, - 0x0c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, - 0x0f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x0f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x93, 0x01, 0x0a, 0x13, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x65, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x61, - 0x72, 0x6c, 0x79, 0x45, 0x78, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x45, - 0x61, 0x72, 0x6c, 0x79, 0x45, 0x78, 0x69, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, - 0x12, 0x14, 0x0a, 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x05, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x8e, 0x01, 0x0a, 0x11, 0x42, 0x75, 0x69, 0x6c, 0x64, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3b, 0x0a, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x72, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x52, - 0x06, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x22, 0xd3, 0x09, 0x0a, 0x12, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x10, - 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, - 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x12, 0x5d, 0x0a, 0x0d, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, 0x03, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x46, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x45, - 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x09, 0x45, 0x78, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x72, 0x73, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, - 0x38, 0x0a, 0x09, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3c, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, - 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x43, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x30, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x66, 0x0a, 0x10, 0x45, 0x78, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x09, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x10, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x39, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x4b, 0x0a, 0x07, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x31, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x63, - 0x6f, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x47, - 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x74, 0x72, 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, - 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x6e, 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6e, - 0x75, 0x6d, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x12, 0x24, 0x0a, - 0x0d, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x10, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x54, 0x6f, 0x74, 0x61, 0x6c, 0x53, 0x74, - 0x65, 0x70, 0x73, 0x12, 0x2c, 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x74, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, - 0x6e, 0x75, 0x6d, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x53, 0x74, 0x65, 0x70, - 0x73, 0x12, 0x42, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x57, 0x61, 0x72, 0x6e, - 0x69, 0x6e, 0x67, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x57, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x1a, 0x40, 0x0a, 0x12, 0x46, 0x72, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x64, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x45, 0x78, 0x70, - 0x6f, 0x72, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5d, - 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, - 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, - 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x6e, - 0x66, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x79, 0x0a, - 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, - 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, - 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, - 0x50, 0x69, 0x6e, 0x6e, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x50, 0x69, - 0x6e, 0x6e, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x1a, 0x0a, 0x08, - 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, - 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x22, 0x1c, 0x0a, 0x1a, 0x55, 0x70, 0x64, 0x61, - 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xe8, 0x01, 0x0a, 0x0a, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x64, 0x69, 0x61, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, - 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0xc1, 0x02, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x48, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x44, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x10, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x40, 0x0a, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x52, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x48, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x52, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x07, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a, 0x58, 0x0a, 0x0c, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, - 0x72, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x41, 0x74, - 0x74, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x2a, 0x3f, 0x0a, - 0x15, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x32, 0x89, - 0x06, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x54, 0x0a, 0x09, 0x44, 0x69, - 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x69, 0x73, 0x6b, 0x55, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x44, - 0x69, 0x73, 0x6b, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x48, 0x0a, 0x05, 0x50, 0x72, 0x75, 0x6e, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x75, - 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x30, 0x01, 0x12, 0x48, 0x0a, 0x05, 0x53, 0x6f, - 0x6c, 0x76, 0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1f, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x30, 0x01, 0x12, 0x4d, 0x0a, 0x07, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1e, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, - 0x30, 0x01, 0x12, 0x5a, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x73, 0x12, 0x24, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, - 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x25, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x30, 0x01, 0x12, 0x6f, 0x0a, 0x12, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x12, - 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, - 0x73, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x48, 0x69, 0x73, 0x74, 0x6f, - 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x3b, 0x6d, 0x6f, 0x62, 0x79, - 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc = "" + + "\n" + + ";github.com/moby/buildkit/api/services/control/control.proto\x12\x10moby.buildkit.v1\x1a/github.com/moby/buildkit/api/types/worker.proto\x1a,github.com/moby/buildkit/solver/pb/ops.proto\x1a5github.com/moby/buildkit/sourcepolicy/pb/policy.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xca\x01\n" + + "\fPruneRequest\x12\x16\n" + + "\x06filter\x18\x01 \x03(\tR\x06filter\x12\x10\n" + + "\x03all\x18\x02 \x01(\bR\x03all\x12\"\n" + + "\fkeepDuration\x18\x03 \x01(\x03R\fkeepDuration\x12$\n" + + "\rreservedSpace\x18\x04 \x01(\x03R\rreservedSpace\x12\"\n" + + "\fmaxUsedSpace\x18\x05 \x01(\x03R\fmaxUsedSpace\x12\"\n" + + "\fminFreeSpace\x18\x06 \x01(\x03R\fminFreeSpace\"F\n" + + "\x10DiskUsageRequest\x12\x16\n" + + "\x06filter\x18\x01 \x03(\tR\x06filter\x12\x1a\n" + + "\bageLimit\x18\x02 \x01(\x03R\bageLimit\"J\n" + + "\x11DiskUsageResponse\x125\n" + + "\x06record\x18\x01 \x03(\v2\x1d.moby.buildkit.v1.UsageRecordR\x06record\"\x87\x03\n" + + "\vUsageRecord\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x18\n" + + "\aMutable\x18\x02 \x01(\bR\aMutable\x12\x14\n" + + "\x05InUse\x18\x03 \x01(\bR\x05InUse\x12\x12\n" + + "\x04Size\x18\x04 \x01(\x03R\x04Size\x12\x1a\n" + + "\x06Parent\x18\x05 \x01(\tB\x02\x18\x01R\x06Parent\x128\n" + + "\tCreatedAt\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\tCreatedAt\x12:\n" + + "\n" + + "LastUsedAt\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\n" + + "LastUsedAt\x12\x1e\n" + + "\n" + + "UsageCount\x18\b \x01(\x03R\n" + + "UsageCount\x12 \n" + + "\vDescription\x18\t \x01(\tR\vDescription\x12\x1e\n" + + "\n" + + "RecordType\x18\n" + + " \x01(\tR\n" + + "RecordType\x12\x16\n" + + "\x06Shared\x18\v \x01(\bR\x06Shared\x12\x18\n" + + "\aParents\x18\f \x03(\tR\aParents\"\xf4\a\n" + + "\fSolveRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12.\n" + + "\n" + + "Definition\x18\x02 \x01(\v2\x0e.pb.DefinitionR\n" + + "Definition\x12.\n" + + "\x12ExporterDeprecated\x18\x03 \x01(\tR\x12ExporterDeprecated\x12u\n" + + "\x17ExporterAttrsDeprecated\x18\x04 \x03(\v2;.moby.buildkit.v1.SolveRequest.ExporterAttrsDeprecatedEntryR\x17ExporterAttrsDeprecated\x12\x18\n" + + "\aSession\x18\x05 \x01(\tR\aSession\x12\x1a\n" + + "\bFrontend\x18\x06 \x01(\tR\bFrontend\x12W\n" + + "\rFrontendAttrs\x18\a \x03(\v21.moby.buildkit.v1.SolveRequest.FrontendAttrsEntryR\rFrontendAttrs\x124\n" + + "\x05Cache\x18\b \x01(\v2\x1e.moby.buildkit.v1.CacheOptionsR\x05Cache\x12\"\n" + + "\fEntitlements\x18\t \x03(\tR\fEntitlements\x12Z\n" + + "\x0eFrontendInputs\x18\n" + + " \x03(\v22.moby.buildkit.v1.SolveRequest.FrontendInputsEntryR\x0eFrontendInputs\x12\x1a\n" + + "\bInternal\x18\v \x01(\bR\bInternal\x12I\n" + + "\fSourcePolicy\x18\f \x01(\v2%.moby.buildkit.v1.sourcepolicy.PolicyR\fSourcePolicy\x128\n" + + "\tExporters\x18\r \x03(\v2\x1a.moby.buildkit.v1.ExporterR\tExporters\x124\n" + + "\x15EnableSessionExporter\x18\x0e \x01(\bR\x15EnableSessionExporter\x1aJ\n" + + "\x1cExporterAttrsDeprecatedEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a@\n" + + "\x12FrontendAttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1aQ\n" + + "\x13FrontendInputsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.pb.DefinitionR\x05value:\x028\x01\"\xad\x03\n" + + "\fCacheOptions\x120\n" + + "\x13ExportRefDeprecated\x18\x01 \x01(\tR\x13ExportRefDeprecated\x122\n" + + "\x14ImportRefsDeprecated\x18\x02 \x03(\tR\x14ImportRefsDeprecated\x12o\n" + + "\x15ExportAttrsDeprecated\x18\x03 \x03(\v29.moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntryR\x15ExportAttrsDeprecated\x12=\n" + + "\aExports\x18\x04 \x03(\v2#.moby.buildkit.v1.CacheOptionsEntryR\aExports\x12=\n" + + "\aImports\x18\x05 \x03(\v2#.moby.buildkit.v1.CacheOptionsEntryR\aImports\x1aH\n" + + "\x1aExportAttrsDeprecatedEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xa7\x01\n" + + "\x11CacheOptionsEntry\x12\x12\n" + + "\x04Type\x18\x01 \x01(\tR\x04Type\x12D\n" + + "\x05Attrs\x18\x02 \x03(\v2..moby.buildkit.v1.CacheOptionsEntry.AttrsEntryR\x05Attrs\x1a8\n" + + "\n" + + "AttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xb7\x01\n" + + "\rSolveResponse\x12a\n" + + "\x10ExporterResponse\x18\x01 \x03(\v25.moby.buildkit.v1.SolveResponse.ExporterResponseEntryR\x10ExporterResponse\x1aC\n" + + "\x15ExporterResponseEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"!\n" + + "\rStatusRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\"\xf0\x01\n" + + "\x0eStatusResponse\x124\n" + + "\bvertexes\x18\x01 \x03(\v2\x18.moby.buildkit.v1.VertexR\bvertexes\x12:\n" + + "\bstatuses\x18\x02 \x03(\v2\x1e.moby.buildkit.v1.VertexStatusR\bstatuses\x12/\n" + + "\x04logs\x18\x03 \x03(\v2\x1b.moby.buildkit.v1.VertexLogR\x04logs\x12;\n" + + "\bwarnings\x18\x04 \x03(\v2\x1f.moby.buildkit.v1.VertexWarningR\bwarnings\"\xa3\x02\n" + + "\x06Vertex\x12\x16\n" + + "\x06digest\x18\x01 \x01(\tR\x06digest\x12\x16\n" + + "\x06inputs\x18\x02 \x03(\tR\x06inputs\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12\x16\n" + + "\x06cached\x18\x04 \x01(\bR\x06cached\x124\n" + + "\astarted\x18\x05 \x01(\v2\x1a.google.protobuf.TimestampR\astarted\x128\n" + + "\tcompleted\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\tcompleted\x12\x14\n" + + "\x05error\x18\a \x01(\tR\x05error\x127\n" + + "\rprogressGroup\x18\b \x01(\v2\x11.pb.ProgressGroupR\rprogressGroup\"\xa4\x02\n" + + "\fVertexStatus\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x16\n" + + "\x06vertex\x18\x02 \x01(\tR\x06vertex\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12\x18\n" + + "\acurrent\x18\x04 \x01(\x03R\acurrent\x12\x14\n" + + "\x05total\x18\x05 \x01(\x03R\x05total\x128\n" + + "\ttimestamp\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x124\n" + + "\astarted\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\astarted\x128\n" + + "\tcompleted\x18\b \x01(\v2\x1a.google.protobuf.TimestampR\tcompleted\"\x87\x01\n" + + "\tVertexLog\x12\x16\n" + + "\x06vertex\x18\x01 \x01(\tR\x06vertex\x128\n" + + "\ttimestamp\x18\x02 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x16\n" + + "\x06stream\x18\x03 \x01(\x03R\x06stream\x12\x10\n" + + "\x03msg\x18\x04 \x01(\fR\x03msg\"\xc4\x01\n" + + "\rVertexWarning\x12\x16\n" + + "\x06vertex\x18\x01 \x01(\tR\x06vertex\x12\x14\n" + + "\x05level\x18\x02 \x01(\x03R\x05level\x12\x14\n" + + "\x05short\x18\x03 \x01(\fR\x05short\x12\x16\n" + + "\x06detail\x18\x04 \x03(\fR\x06detail\x12\x10\n" + + "\x03url\x18\x05 \x01(\tR\x03url\x12\"\n" + + "\x04info\x18\x06 \x01(\v2\x0e.pb.SourceInfoR\x04info\x12!\n" + + "\x06ranges\x18\a \x03(\v2\t.pb.RangeR\x06ranges\"\"\n" + + "\fBytesMessage\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\",\n" + + "\x12ListWorkersRequest\x12\x16\n" + + "\x06filter\x18\x01 \x03(\tR\x06filter\"S\n" + + "\x13ListWorkersResponse\x12<\n" + + "\x06record\x18\x01 \x03(\v2$.moby.buildkit.v1.types.WorkerRecordR\x06record\"\r\n" + + "\vInfoRequest\"a\n" + + "\fInfoResponse\x12Q\n" + + "\x0fbuildkitVersion\x18\x01 \x01(\v2'.moby.buildkit.v1.types.BuildkitVersionR\x0fbuildkitVersion\"\x93\x01\n" + + "\x13BuildHistoryRequest\x12\x1e\n" + + "\n" + + "ActiveOnly\x18\x01 \x01(\bR\n" + + "ActiveOnly\x12\x10\n" + + "\x03Ref\x18\x02 \x01(\tR\x03Ref\x12\x1c\n" + + "\tEarlyExit\x18\x03 \x01(\bR\tEarlyExit\x12\x16\n" + + "\x06Filter\x18\x04 \x03(\tR\x06Filter\x12\x14\n" + + "\x05Limit\x18\x05 \x01(\x05R\x05Limit\"\x8e\x01\n" + + "\x11BuildHistoryEvent\x12;\n" + + "\x04type\x18\x01 \x01(\x0e2'.moby.buildkit.v1.BuildHistoryEventTypeR\x04type\x12<\n" + + "\x06record\x18\x02 \x01(\v2$.moby.buildkit.v1.BuildHistoryRecordR\x06record\"\xd3\t\n" + + "\x12BuildHistoryRecord\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12\x1a\n" + + "\bFrontend\x18\x02 \x01(\tR\bFrontend\x12]\n" + + "\rFrontendAttrs\x18\x03 \x03(\v27.moby.buildkit.v1.BuildHistoryRecord.FrontendAttrsEntryR\rFrontendAttrs\x128\n" + + "\tExporters\x18\x04 \x03(\v2\x1a.moby.buildkit.v1.ExporterR\tExporters\x12(\n" + + "\x05error\x18\x05 \x01(\v2\x12.google.rpc.StatusR\x05error\x128\n" + + "\tCreatedAt\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\tCreatedAt\x12<\n" + + "\vCompletedAt\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\vCompletedAt\x120\n" + + "\x04logs\x18\b \x01(\v2\x1c.moby.buildkit.v1.DescriptorR\x04logs\x12f\n" + + "\x10ExporterResponse\x18\t \x03(\v2:.moby.buildkit.v1.BuildHistoryRecord.ExporterResponseEntryR\x10ExporterResponse\x129\n" + + "\x06Result\x18\n" + + " \x01(\v2!.moby.buildkit.v1.BuildResultInfoR\x06Result\x12K\n" + + "\aResults\x18\v \x03(\v21.moby.buildkit.v1.BuildHistoryRecord.ResultsEntryR\aResults\x12\x1e\n" + + "\n" + + "Generation\x18\f \x01(\x05R\n" + + "Generation\x122\n" + + "\x05trace\x18\r \x01(\v2\x1c.moby.buildkit.v1.DescriptorR\x05trace\x12\x16\n" + + "\x06pinned\x18\x0e \x01(\bR\x06pinned\x12&\n" + + "\x0enumCachedSteps\x18\x0f \x01(\x05R\x0enumCachedSteps\x12$\n" + + "\rnumTotalSteps\x18\x10 \x01(\x05R\rnumTotalSteps\x12,\n" + + "\x11numCompletedSteps\x18\x11 \x01(\x05R\x11numCompletedSteps\x12B\n" + + "\rexternalError\x18\x12 \x01(\v2\x1c.moby.buildkit.v1.DescriptorR\rexternalError\x12 \n" + + "\vnumWarnings\x18\x13 \x01(\x05R\vnumWarnings\x1a@\n" + + "\x12FrontendAttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1aC\n" + + "\x15ExporterResponseEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a]\n" + + "\fResultsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x127\n" + + "\x05value\x18\x02 \x01(\v2!.moby.buildkit.v1.BuildResultInfoR\x05value:\x028\x01\"y\n" + + "\x19UpdateBuildHistoryRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12\x16\n" + + "\x06Pinned\x18\x02 \x01(\bR\x06Pinned\x12\x16\n" + + "\x06Delete\x18\x03 \x01(\bR\x06Delete\x12\x1a\n" + + "\bFinalize\x18\x04 \x01(\bR\bFinalize\"\x1c\n" + + "\x1aUpdateBuildHistoryResponse\"\xe8\x01\n" + + "\n" + + "Descriptor\x12\x1d\n" + + "\n" + + "media_type\x18\x01 \x01(\tR\tmediaType\x12\x16\n" + + "\x06digest\x18\x02 \x01(\tR\x06digest\x12\x12\n" + + "\x04size\x18\x03 \x01(\x03R\x04size\x12O\n" + + "\vannotations\x18\x05 \x03(\v2-.moby.buildkit.v1.Descriptor.AnnotationsEntryR\vannotations\x1a>\n" + + "\x10AnnotationsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc1\x02\n" + + "\x0fBuildResultInfo\x12H\n" + + "\x10ResultDeprecated\x18\x01 \x01(\v2\x1c.moby.buildkit.v1.DescriptorR\x10ResultDeprecated\x12@\n" + + "\fAttestations\x18\x02 \x03(\v2\x1c.moby.buildkit.v1.DescriptorR\fAttestations\x12H\n" + + "\aResults\x18\x03 \x03(\v2..moby.buildkit.v1.BuildResultInfo.ResultsEntryR\aResults\x1aX\n" + + "\fResultsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\x03R\x03key\x122\n" + + "\x05value\x18\x02 \x01(\v2\x1c.moby.buildkit.v1.DescriptorR\x05value:\x028\x01\"\x95\x01\n" + + "\bExporter\x12\x12\n" + + "\x04Type\x18\x01 \x01(\tR\x04Type\x12;\n" + + "\x05Attrs\x18\x02 \x03(\v2%.moby.buildkit.v1.Exporter.AttrsEntryR\x05Attrs\x1a8\n" + + "\n" + + "AttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01*?\n" + + "\x15BuildHistoryEventType\x12\v\n" + + "\aSTARTED\x10\x00\x12\f\n" + + "\bCOMPLETE\x10\x01\x12\v\n" + + "\aDELETED\x10\x022\x89\x06\n" + + "\aControl\x12T\n" + + "\tDiskUsage\x12\".moby.buildkit.v1.DiskUsageRequest\x1a#.moby.buildkit.v1.DiskUsageResponse\x12H\n" + + "\x05Prune\x12\x1e.moby.buildkit.v1.PruneRequest\x1a\x1d.moby.buildkit.v1.UsageRecord0\x01\x12H\n" + + "\x05Solve\x12\x1e.moby.buildkit.v1.SolveRequest\x1a\x1f.moby.buildkit.v1.SolveResponse\x12M\n" + + "\x06Status\x12\x1f.moby.buildkit.v1.StatusRequest\x1a .moby.buildkit.v1.StatusResponse0\x01\x12M\n" + + "\aSession\x12\x1e.moby.buildkit.v1.BytesMessage\x1a\x1e.moby.buildkit.v1.BytesMessage(\x010\x01\x12Z\n" + + "\vListWorkers\x12$.moby.buildkit.v1.ListWorkersRequest\x1a%.moby.buildkit.v1.ListWorkersResponse\x12E\n" + + "\x04Info\x12\x1d.moby.buildkit.v1.InfoRequest\x1a\x1e.moby.buildkit.v1.InfoResponse\x12b\n" + + "\x12ListenBuildHistory\x12%.moby.buildkit.v1.BuildHistoryRequest\x1a#.moby.buildkit.v1.BuildHistoryEvent0\x01\x12o\n" + + "\x12UpdateBuildHistory\x12+.moby.buildkit.v1.UpdateBuildHistoryRequest\x1a,.moby.buildkit.v1.UpdateBuildHistoryResponseB@Z>github.com/moby/buildkit/api/services/control;moby_buildkit_v1b\x06proto3" var ( file_github_com_moby_buildkit_api_services_control_control_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_api_services_control_control_proto_rawDescData = file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc + file_github_com_moby_buildkit_api_services_control_control_proto_rawDescData []byte ) func file_github_com_moby_buildkit_api_services_control_control_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_api_services_control_control_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_api_services_control_control_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_api_services_control_control_proto_rawDescData) + file_github_com_moby_buildkit_api_services_control_control_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc), len(file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc))) }) return file_github_com_moby_buildkit_api_services_control_control_proto_rawDescData } @@ -2651,7 +2385,7 @@ func file_github_com_moby_buildkit_api_services_control_control_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc), len(file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc)), NumEnums: 1, NumMessages: 39, NumExtensions: 0, @@ -2663,7 +2397,6 @@ func file_github_com_moby_buildkit_api_services_control_control_proto_init() { MessageInfos: file_github_com_moby_buildkit_api_services_control_control_proto_msgTypes, }.Build() File_github_com_moby_buildkit_api_services_control_control_proto = out.File - file_github_com_moby_buildkit_api_services_control_control_proto_rawDesc = nil file_github_com_moby_buildkit_api_services_control_control_proto_goTypes = nil file_github_com_moby_buildkit_api_services_control_control_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.proto b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.proto index 85da9a72a..3e47752ff 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -36,6 +36,7 @@ message PruneRequest { message DiskUsageRequest { repeated string filter = 1; + int64 ageLimit = 2; } message DiskUsageResponse { @@ -74,6 +75,7 @@ message SolveRequest { bool Internal = 11; // Internal builds are not recorded in build history moby.buildkit.v1.sourcepolicy.Policy SourcePolicy = 12; repeated Exporter Exporters = 13; + bool EnableSessionExporter = 14; } message CacheOptions { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go index e4daa74ca..1d72ca07e 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/services/control/control_vtproto.pb.go @@ -56,6 +56,7 @@ func (m *DiskUsageRequest) CloneVT() *DiskUsageRequest { return (*DiskUsageRequest)(nil) } r := new(DiskUsageRequest) + r.AgeLimit = m.AgeLimit if rhs := m.Filter; rhs != nil { tmpContainer := make([]string, len(rhs)) copy(tmpContainer, rhs) @@ -140,6 +141,7 @@ func (m *SolveRequest) CloneVT() *SolveRequest { r.Cache = m.Cache.CloneVT() r.Internal = m.Internal r.SourcePolicy = m.SourcePolicy.CloneVT() + r.EnableSessionExporter = m.EnableSessionExporter if rhs := m.ExporterAttrsDeprecated; rhs != nil { tmpContainer := make(map[string]string, len(rhs)) for k, v := range rhs { @@ -830,6 +832,9 @@ func (this *DiskUsageRequest) EqualVT(that *DiskUsageRequest) bool { return false } } + if this.AgeLimit != that.AgeLimit { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -1031,6 +1036,9 @@ func (this *SolveRequest) EqualVT(that *SolveRequest) bool { } } } + if this.EnableSessionExporter != that.EnableSessionExporter { + return false + } return string(this.unknownFields) == string(that.unknownFields) } @@ -2015,6 +2023,11 @@ func (m *DiskUsageRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.AgeLimit != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.AgeLimit)) + i-- + dAtA[i] = 0x10 + } if len(m.Filter) > 0 { for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.Filter[iNdEx]) @@ -2232,6 +2245,16 @@ func (m *SolveRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.EnableSessionExporter { + i-- + if m.EnableSessionExporter { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + } if len(m.Exporters) > 0 { for iNdEx := len(m.Exporters) - 1; iNdEx >= 0; iNdEx-- { size, err := m.Exporters[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) @@ -3977,6 +4000,9 @@ func (m *DiskUsageRequest) SizeVT() (n int) { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } + if m.AgeLimit != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.AgeLimit)) + } n += len(m.unknownFields) return n } @@ -4130,6 +4156,9 @@ func (m *SolveRequest) SizeVT() (n int) { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } + if m.EnableSessionExporter { + n += 2 + } n += len(m.unknownFields) return n } @@ -4989,6 +5018,25 @@ func (m *DiskUsageRequest) UnmarshalVT(dAtA []byte) error { } m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AgeLimit", wireType) + } + m.AgeLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AgeLimit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -6211,6 +6259,26 @@ func (m *SolveRequest) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableSessionExporter", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EnableSessionExporter = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/types/worker.pb.go index b49b47d32..448548c82 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/types/worker.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/api/types/worker.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/api/types/worker.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,16 +23,15 @@ const ( ) type WorkerRecord struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Platforms []*pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` - GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` - BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"` - CDIDevices []*CDIDevice `protobuf:"bytes,6,rep,name=CDIDevices,proto3" json:"CDIDevices,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Platforms []*pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms,omitempty"` + GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` + BuildkitVersion *BuildkitVersion `protobuf:"bytes,5,opt,name=BuildkitVersion,proto3" json:"BuildkitVersion,omitempty"` + CDIDevices []*CDIDevice `protobuf:"bytes,6,rep,name=CDIDevices,proto3" json:"CDIDevices,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WorkerRecord) Reset() { @@ -107,17 +107,16 @@ func (x *WorkerRecord) GetCDIDevices() []*CDIDevice { } type GCPolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` - KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` - Filters []string `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` + KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` + Filters []string `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` // reservedSpace was renamed from freeBytes ReservedSpace int64 `protobuf:"varint,3,opt,name=reservedSpace,proto3" json:"reservedSpace,omitempty"` MaxUsedSpace int64 `protobuf:"varint,5,opt,name=maxUsedSpace,proto3" json:"maxUsedSpace,omitempty"` MinFreeSpace int64 `protobuf:"varint,6,opt,name=minFreeSpace,proto3" json:"minFreeSpace,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GCPolicy) Reset() { @@ -193,13 +192,12 @@ func (x *GCPolicy) GetMinFreeSpace() int64 { } type BuildkitVersion struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` unknownFields protoimpl.UnknownFields - - Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,3,opt,name=revision,proto3" json:"revision,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BuildkitVersion) Reset() { @@ -254,14 +252,13 @@ func (x *BuildkitVersion) GetRevision() string { } type CDIDevice struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + AutoAllow bool `protobuf:"varint,2,opt,name=AutoAllow,proto3" json:"AutoAllow,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=Annotations,proto3" json:"Annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + OnDemand bool `protobuf:"varint,4,opt,name=OnDemand,proto3" json:"OnDemand,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - AutoAllow bool `protobuf:"varint,2,opt,name=AutoAllow,proto3" json:"AutoAllow,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=Annotations,proto3" json:"Annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - OnDemand bool `protobuf:"varint,4,opt,name=OnDemand,proto3" json:"OnDemand,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CDIDevice) Reset() { @@ -324,90 +321,49 @@ func (x *CDIDevice) GetOnDemand() bool { var File_github_com_moby_buildkit_api_types_worker_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_api_types_worker_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x16, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x1a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x62, 0x2f, 0x6f, 0x70, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa3, 0x03, 0x0a, 0x0c, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x48, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x2e, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x4c, 0x61, 0x62, 0x65, - 0x6c, 0x73, 0x12, 0x2a, 0x0a, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x52, 0x09, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x12, 0x3c, - 0x0a, 0x08, 0x47, 0x43, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x47, 0x43, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x52, 0x08, 0x47, 0x43, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x51, 0x0a, 0x0f, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x42, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x0f, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x41, 0x0a, 0x0a, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x44, 0x49, - 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0a, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, - 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc8, 0x01, - 0x0a, 0x08, 0x47, 0x43, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x6c, - 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x6c, 0x6c, 0x12, 0x22, 0x0a, 0x0c, - 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x0c, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x18, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, - 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, - 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x55, 0x73, 0x65, 0x64, 0x53, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x46, 0x72, 0x65, 0x65, 0x53, - 0x70, 0x61, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x6d, 0x69, 0x6e, 0x46, - 0x72, 0x65, 0x65, 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0x61, 0x0a, 0x0f, 0x42, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xef, 0x01, 0x0a, 0x09, - 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, - 0x09, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x41, 0x75, 0x74, 0x6f, 0x41, 0x6c, 0x6c, 0x6f, 0x77, 0x12, 0x54, 0x0a, 0x0b, 0x41, - 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, - 0x69, 0x63, 0x65, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x4f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x4f, 0x6e, 0x44, 0x65, 0x6d, 0x61, 0x6e, 0x64, 0x1a, 0x3e, 0x0a, - 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x3b, 0x5a, - 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, - 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x3b, 0x6d, 0x6f, 0x62, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x5f, 0x76, 0x31, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_github_com_moby_buildkit_api_types_worker_proto_rawDesc = "" + + "\n" + + "/github.com/moby/buildkit/api/types/worker.proto\x12\x16moby.buildkit.v1.types\x1a,github.com/moby/buildkit/solver/pb/ops.proto\"\xa3\x03\n" + + "\fWorkerRecord\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12H\n" + + "\x06Labels\x18\x02 \x03(\v20.moby.buildkit.v1.types.WorkerRecord.LabelsEntryR\x06Labels\x12*\n" + + "\tplatforms\x18\x03 \x03(\v2\f.pb.PlatformR\tplatforms\x12<\n" + + "\bGCPolicy\x18\x04 \x03(\v2 .moby.buildkit.v1.types.GCPolicyR\bGCPolicy\x12Q\n" + + "\x0fBuildkitVersion\x18\x05 \x01(\v2'.moby.buildkit.v1.types.BuildkitVersionR\x0fBuildkitVersion\x12A\n" + + "\n" + + "CDIDevices\x18\x06 \x03(\v2!.moby.buildkit.v1.types.CDIDeviceR\n" + + "CDIDevices\x1a9\n" + + "\vLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc8\x01\n" + + "\bGCPolicy\x12\x10\n" + + "\x03all\x18\x01 \x01(\bR\x03all\x12\"\n" + + "\fkeepDuration\x18\x02 \x01(\x03R\fkeepDuration\x12\x18\n" + + "\afilters\x18\x04 \x03(\tR\afilters\x12$\n" + + "\rreservedSpace\x18\x03 \x01(\x03R\rreservedSpace\x12\"\n" + + "\fmaxUsedSpace\x18\x05 \x01(\x03R\fmaxUsedSpace\x12\"\n" + + "\fminFreeSpace\x18\x06 \x01(\x03R\fminFreeSpace\"a\n" + + "\x0fBuildkitVersion\x12\x18\n" + + "\apackage\x18\x01 \x01(\tR\apackage\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12\x1a\n" + + "\brevision\x18\x03 \x01(\tR\brevision\"\xef\x01\n" + + "\tCDIDevice\x12\x12\n" + + "\x04Name\x18\x01 \x01(\tR\x04Name\x12\x1c\n" + + "\tAutoAllow\x18\x02 \x01(\bR\tAutoAllow\x12T\n" + + "\vAnnotations\x18\x03 \x03(\v22.moby.buildkit.v1.types.CDIDevice.AnnotationsEntryR\vAnnotations\x12\x1a\n" + + "\bOnDemand\x18\x04 \x01(\bR\bOnDemand\x1a>\n" + + "\x10AnnotationsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B;Z9github.com/moby/buildkit/api/types;moby_buildkit_v1_typesb\x06proto3" var ( file_github_com_moby_buildkit_api_types_worker_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_api_types_worker_proto_rawDescData = file_github_com_moby_buildkit_api_types_worker_proto_rawDesc + file_github_com_moby_buildkit_api_types_worker_proto_rawDescData []byte ) func file_github_com_moby_buildkit_api_types_worker_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_api_types_worker_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_api_types_worker_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_api_types_worker_proto_rawDescData) + file_github_com_moby_buildkit_api_types_worker_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_api_types_worker_proto_rawDesc), len(file_github_com_moby_buildkit_api_types_worker_proto_rawDesc))) }) return file_github_com_moby_buildkit_api_types_worker_proto_rawDescData } @@ -445,7 +401,7 @@ func file_github_com_moby_buildkit_api_types_worker_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_api_types_worker_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_api_types_worker_proto_rawDesc), len(file_github_com_moby_buildkit_api_types_worker_proto_rawDesc)), NumEnums: 0, NumMessages: 6, NumExtensions: 0, @@ -456,7 +412,6 @@ func file_github_com_moby_buildkit_api_types_worker_proto_init() { MessageInfos: file_github_com_moby_buildkit_api_types_worker_proto_msgTypes, }.Build() File_github_com_moby_buildkit_api_types_worker_proto = out.File - file_github_com_moby_buildkit_api_types_worker_proto_rawDesc = nil file_github_com_moby_buildkit_api_types_worker_proto_goTypes = nil file_github_com_moby_buildkit_api_types_worker_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/build.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/build.go index c27c1a60c..93787ba4f 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/build.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/build.go @@ -53,7 +53,8 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF return err } - gwClient.caps = g.BuildOpts().Caps + caps := g.BuildOpts().Caps + gwClient.caps = &caps if err := g.Run(ctx, buildFunc); err != nil { return errors.Wrap(err, "failed to run Build function") @@ -72,10 +73,14 @@ func (c *Client) gatewayClientForBuild(buildid string) *gatewayClientForBuild { } } +func (c *Client) GatewayClientForBuild(buildid string) gatewayapi.LLBBridgeClient { + return c.gatewayClientForBuild(buildid) +} + type gatewayClientForBuild struct { gateway gatewayapi.LLBBridgeClient buildID string - caps apicaps.CapSet + caps *apicaps.CapSet } func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) { @@ -99,29 +104,35 @@ func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.Rea } func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) { - if err := g.caps.Supports(gatewayapi.CapReadDir); err != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapReadDir); err != nil { + return nil, err + } } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.ReadDir(ctx, in, opts...) } func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) { - if err := g.caps.Supports(gatewayapi.CapStatFile); err != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapStatFile); err != nil { + return nil, err + } } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.StatFile(ctx, in, opts...) } func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil { - if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil { + if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil { + return nil, err + } + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + _, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...) + return &gatewayapi.EvaluateResponse{}, err } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - _, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...) - return &gatewayapi.EvaluateResponse{}, err } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.Evaluate(ctx, in, opts...) @@ -138,32 +149,40 @@ func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.Retur } func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) { - if err := g.caps.Supports(gatewayapi.CapFrontendInputs); err != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapFrontendInputs); err != nil { + return nil, err + } } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.Inputs(ctx, in, opts...) } func (g *gatewayClientForBuild) NewContainer(ctx context.Context, in *gatewayapi.NewContainerRequest, opts ...grpc.CallOption) (*gatewayapi.NewContainerResponse, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { + return nil, err + } } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.NewContainer(ctx, in, opts...) } func (g *gatewayClientForBuild) ReleaseContainer(ctx context.Context, in *gatewayapi.ReleaseContainerRequest, opts ...grpc.CallOption) (*gatewayapi.ReleaseContainerResponse, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { + return nil, err + } } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.ReleaseContainer(ctx, in, opts...) } func (g *gatewayClientForBuild) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (gatewayapi.LLBBridge_ExecProcessClient, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { - return nil, err + if g.caps != nil { + if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { + return nil, err + } } ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.ExecProcess(ctx, opts...) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/client.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/client.go index 2ca45ca5f..ff3b6669b 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/client.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/client.go @@ -151,7 +151,8 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error gopts = append(gopts, grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor)) gopts = append(gopts, customDialOptions...) - //nolint:staticcheck // ignore SA1019 NewClient has different behavior and needs to be tested + // ignore SA1019 NewClient has different behavior and needs to be tested + //nolint:staticcheck conn, err := grpc.DialContext(ctx, address, gopts...) if err != nil { return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/diskusage.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/diskusage.go index a83815f4f..2471ac297 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/diskusage.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/diskusage.go @@ -1,8 +1,9 @@ package client import ( + "cmp" "context" - "sort" + "slices" "time" controlapi "github.com/moby/buildkit/api/services/control" @@ -30,7 +31,7 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa o.SetDiskUsageOption(info) } - req := &controlapi.DiskUsageRequest{Filter: info.Filter} + req := &controlapi.DiskUsageRequest{Filter: info.Filter, AgeLimit: int64(info.AgeLimit)} resp, err := c.ControlClient().DiskUsage(ctx, req) if err != nil { return nil, errors.Wrap(err, "failed to call diskusage") @@ -60,13 +61,9 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa }) } - sort.Slice(du, func(i, j int) bool { - if du[i].Size == du[j].Size { - return du[i].ID > du[j].ID - } - return du[i].Size > du[j].Size + slices.SortFunc(du, func(a, b *UsageInfo) int { + return cmp.Or(cmp.Compare(a.Size, b.Size), cmp.Compare(a.ID, b.ID)) }) - return du, nil } @@ -75,7 +72,8 @@ type DiskUsageOption interface { } type DiskUsageInfo struct { - Filter []string + Filter []string + AgeLimit time.Duration } type UsageRecordType string @@ -88,3 +86,15 @@ const ( UsageRecordTypeCacheMount UsageRecordType = "exec.cachemount" UsageRecordTypeRegular UsageRecordType = "regular" ) + +type diskUsageOptionFunc func(*DiskUsageInfo) + +func (f diskUsageOptionFunc) SetDiskUsageOption(info *DiskUsageInfo) { + f(info) +} + +func WithAgeLimit(age time.Duration) DiskUsageOption { + return diskUsageOptionFunc(func(info *DiskUsageInfo) { + info.AgeLimit = age + }) +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/definition.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/definition.go index 3c9ee7320..73c8097ab 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/definition.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/definition.go @@ -240,7 +240,7 @@ func (d *DefinitionOp) Inputs() []Output { d.mu.Unlock() inputs = append(inputs, &output{vertex: vtx, platform: platform, getIndex: func() (pb.OutputIndex, error) { - return pb.OutputIndex(vtx.index), nil + return vtx.index, nil }}) } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/exec.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/exec.go index ba66e7332..cd86f6f7c 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -5,7 +5,7 @@ import ( _ "crypto/sha256" // for opencontainers/go-digest "fmt" "net" - "sort" + "slices" "strings" "github.com/moby/buildkit/solver/pb" @@ -143,8 +143,8 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] return "", nil, nil, nil, err } // make sure mounts are sorted - sort.Slice(e.mounts, func(i, j int) bool { - return e.mounts[i].target < e.mounts[j].target + slices.SortFunc(e.mounts, func(a, b *mount) int { + return strings.Compare(a.target, b.target) }) env, err := getEnv(e.base)(ctx, c) @@ -170,7 +170,10 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } else if e.constraints.Platform != nil { os = e.constraints.Platform.OS } - env = env.SetDefault("PATH", system.DefaultPathEnv(os)) + // don't set PATH on Windows. #5445 + if os != "windows" { + env = env.SetDefault("PATH", system.DefaultPathEnv(os)) + } } else { addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath) } @@ -477,10 +480,9 @@ func (e *ExecOp) Inputs() (inputs []Output) { // make sure mounts are sorted // the same sort occurs in (*ExecOp).Marshal, and this // sort must be the same - sort.Slice(e.mounts, func(i int, j int) bool { - return e.mounts[i].target < e.mounts[j].target + slices.SortFunc(e.mounts, func(a, b *mount) int { + return strings.Compare(a.target, b.target) }) - seen := map[Output]struct{}{} for _, m := range e.mounts { if m.source != nil { @@ -497,8 +499,8 @@ func (e *ExecOp) Inputs() (inputs []Output) { func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) { return func() (pb.OutputIndex, error) { // make sure mounts are sorted - sort.Slice(e.mounts, func(i, j int) bool { - return e.mounts[i].target < e.mounts[j].target + slices.SortFunc(e.mounts, func(a, b *mount) int { + return strings.Compare(a.target, b.target) }) i := 0 @@ -614,7 +616,7 @@ func Shlex(str string) RunOption { }) } -func Shlexf(str string, v ...interface{}) RunOption { +func Shlexf(str string, v ...any) RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.State = shlexf(str, true, v...)(ei.State) }) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/marshal.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/marshal.go index d23a83b11..d44703110 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/marshal.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/marshal.go @@ -2,6 +2,7 @@ package llb import ( "io" + "slices" "sync" cerrdefs "github.com/containerd/errdefs" @@ -84,7 +85,7 @@ func ReadFrom(r io.Reader) (*Definition, error) { func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { c := *base - c.WorkerConstraints = append([]string{}, c.WorkerConstraints...) + c.WorkerConstraints = slices.Clone(c.WorkerConstraints) if p := override.Platform; p != nil { c.Platform = p @@ -105,7 +106,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { OSVersion: c.Platform.OSVersion, } if c.Platform.OSFeatures != nil { - opPlatform.OSFeatures = append([]string{}, c.Platform.OSFeatures...) + opPlatform.OSFeatures = slices.Clone(c.Platform.OSFeatures) } return &pb.Op{ diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/meta.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/meta.go index 640be7478..c25f3db50 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/meta.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -35,7 +35,7 @@ var ( // AddEnvf is the same as [AddEnv] but allows for a format string. // This is the equivalent of `[State.AddEnvf]` -func AddEnvf(key, value string, v ...interface{}) StateOption { +func AddEnvf(key, value string, v ...any) StateOption { return addEnvf(key, value, true, v...) } @@ -46,12 +46,12 @@ func AddEnv(key, value string) StateOption { return addEnvf(key, value, false) } -func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { +func addEnvf(key, value string, replace bool, v ...any) StateOption { if replace { value = fmt.Sprintf(value, v...) } return func(s State) State { - return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (interface{}, error) { + return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (any, error) { env, err := getEnv(s)(ctx, c) if err != nil { return nil, err @@ -69,16 +69,16 @@ func Dir(str string) StateOption { } // Dirf is the same as [Dir] but allows for a format string. -func Dirf(str string, v ...interface{}) StateOption { +func Dirf(str string, v ...any) StateOption { return dirf(str, true, v...) } -func dirf(value string, replace bool, v ...interface{}) StateOption { +func dirf(value string, replace bool, v ...any) StateOption { if replace { value = fmt.Sprintf(value, v...) } return func(s State) State { - return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (interface{}, error) { + return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (any, error) { if !path.IsAbs(value) { prev, err := getDir(s)(ctx, c) if err != nil { @@ -213,7 +213,7 @@ func args(args ...string) StateOption { } } -func shlexf(str string, replace bool, v ...interface{}) StateOption { +func shlexf(str string, replace bool, v ...any) StateOption { if replace { str = fmt.Sprintf(str, v...) } @@ -248,7 +248,7 @@ func getPlatform(s State) func(context.Context, *Constraints) (*ocispecs.Platfor func extraHost(host string, ip net.IP) StateOption { return func(s State) State { - return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (interface{}, error) { + return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (any, error) { v, err := getExtraHosts(s)(ctx, c) if err != nil { return nil, err @@ -278,7 +278,7 @@ type HostIP struct { func ulimit(name UlimitName, soft int64, hard int64) StateOption { return func(s State) State { - return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (interface{}, error) { + return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (any, error) { v, err := getUlimit(s)(ctx, c) if err != nil { return nil, err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/source.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/source.go index b08027e1d..d6eba0758 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/source.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/source.go @@ -136,7 +136,7 @@ func Image(ref string, opts ...ImageOption) State { } else if info.metaResolver != nil { if _, ok := r.(reference.Digested); ok || !info.resolveDigest { return NewState(src.Output()).Async(func(ctx context.Context, st State, c *Constraints) (State, error) { - p := info.Constraints.Platform + p := info.Platform if p == nil { p = c.Platform } @@ -153,7 +153,7 @@ func Image(ref string, opts ...ImageOption) State { }) } return Scratch().Async(func(ctx context.Context, _ State, c *Constraints) (State, error) { - p := info.Constraints.Platform + p := info.Platform if p == nil { p = c.Platform } @@ -322,6 +322,12 @@ func Git(url, ref string, opts ...GitOption) State { addCap(&gi.Constraints, pb.CapSourceGitMountSSHSock) } + checksum := gi.Checksum + if checksum != "" { + attrs[pb.AttrGitChecksum] = checksum + addCap(&gi.Constraints, pb.CapSourceGitChecksum) + } + addCap(&gi.Constraints, pb.CapSourceGit) source := NewSource("git://"+id, attrs, gi.Constraints) @@ -345,6 +351,7 @@ type GitInfo struct { addAuthCap bool KnownSSHHosts string MountSSHSock string + Checksum string } func KeepGitDir() GitOption { @@ -360,13 +367,6 @@ func AuthTokenSecret(v string) GitOption { }) } -func AuthHeaderSecret(v string) GitOption { - return gitOptionFunc(func(gi *GitInfo) { - gi.AuthHeaderSecret = v - gi.addAuthCap = true - }) -} - func KnownSSHHosts(key string) GitOption { key = strings.TrimSuffix(key, "\n") return gitOptionFunc(func(gi *GitInfo) { @@ -380,6 +380,35 @@ func MountSSHSock(sshID string) GitOption { }) } +func GitChecksum(v string) GitOption { + return gitOptionFunc(func(gi *GitInfo) { + gi.Checksum = v + }) +} + +// AuthOption can be used with either HTTP or Git sources. +type AuthOption interface { + GitOption + HTTPOption +} + +// AuthHeaderSecret returns an AuthOption that defines the name of a +// secret to use for HTTP based authentication. +func AuthHeaderSecret(secretName string) AuthOption { + return struct { + GitOption + HTTPOption + }{ + GitOption: gitOptionFunc(func(gi *GitInfo) { + gi.AuthHeaderSecret = secretName + gi.addAuthCap = true + }), + HTTPOption: httpOptionFunc(func(hi *HTTPInfo) { + hi.AuthHeaderSecret = secretName + }), + } +} + // Scratch returns a state that represents an empty filesystem. func Scratch() State { return NewState(nil) @@ -419,6 +448,13 @@ func Local(name string, opts ...LocalOption) State { addCap(&gi.Constraints, pb.CapSourceLocalDiffer) } } + if gi.MetadataOnlyCollector { + attrs[pb.AttrMetadataTransfer] = "true" + if gi.MetadataOnlyExceptions != "" { + attrs[pb.AttrMetadataTransferExclude] = gi.MetadataOnlyExceptions + } + addCap(&gi.Constraints, pb.CapSourceMetadataTransfer) + } addCap(&gi.Constraints, pb.CapSourceLocal) @@ -490,6 +526,18 @@ func Differ(t DiffType, required bool) LocalOption { }) } +func MetadataOnlyTransfer(exceptions []string) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.MetadataOnlyCollector = true + if len(exceptions) == 0 { + li.MetadataOnlyExceptions = "" + } else { + dt, _ := json.Marshal(exceptions) // empty on error + li.MetadataOnlyExceptions = string(dt) + } + }) +} + func OCILayout(ref string, opts ...OCILayoutOption) State { gi := &OCILayoutInfo{} @@ -562,12 +610,14 @@ type DifferInfo struct { type LocalInfo struct { constraintsWrapper - SessionID string - IncludePatterns string - ExcludePatterns string - FollowPaths string - SharedKeyHint string - Differ DifferInfo + SessionID string + IncludePatterns string + ExcludePatterns string + FollowPaths string + SharedKeyHint string + Differ DifferInfo + MetadataOnlyCollector bool + MetadataOnlyExceptions string } func HTTP(url string, opts ...HTTPOption) State { @@ -595,6 +645,14 @@ func HTTP(url string, opts ...HTTPOption) State { attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) } + if hi.AuthHeaderSecret != "" { + attrs[pb.AttrHTTPAuthHeaderSecret] = hi.AuthHeaderSecret + addCap(&hi.Constraints, pb.CapSourceHTTPAuth) + } + if hi.Header != nil { + hi.Header.setAttrs(attrs) + addCap(&hi.Constraints, pb.CapSourceHTTPHeader) + } addCap(&hi.Constraints, pb.CapSourceHTTP) source := NewSource(url, attrs, hi.Constraints) @@ -603,11 +661,13 @@ func HTTP(url string, opts ...HTTPOption) State { type HTTPInfo struct { constraintsWrapper - Checksum digest.Digest - Filename string - Perm int - UID int - GID int + Checksum digest.Digest + Filename string + Perm int + UID int + GID int + AuthHeaderSecret string + Header *HTTPHeader } type HTTPOption interface { @@ -645,6 +705,33 @@ func Chown(uid, gid int) HTTPOption { }) } +// Header returns an [HTTPOption] that ensures additional request headers will +// be sent when retrieving the HTTP source. +func Header(header HTTPHeader) HTTPOption { + return httpOptionFunc(func(hi *HTTPInfo) { + hi.Header = &header + }) +} + +type HTTPHeader struct { + Accept string + UserAgent string +} + +func (hh *HTTPHeader) setAttrs(attrs map[string]string) { + if hh.Accept != "" { + attrs[hh.attr("accept")] = hh.Accept + } + + if hh.UserAgent != "" { + attrs[hh.attr("user-agent")] = hh.UserAgent + } +} + +func (hh *HTTPHeader) attr(name string) string { + return pb.AttrHTTPHeaderPrefix + name +} + func platformSpecificSource(id string) bool { return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://") } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/state.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/state.go index 5743a31e3..e63fb8667 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/llb/state.go @@ -6,6 +6,7 @@ import ( "fmt" "maps" "net" + "slices" "strings" "github.com/containerd/platforms" @@ -59,8 +60,8 @@ func NewState(o Output) State { type State struct { out Output prev *State - key interface{} - value func(context.Context, *Constraints) (interface{}, error) + key any + value func(context.Context, *Constraints) (any, error) opts []ConstraintsOpt async *asyncState } @@ -76,13 +77,13 @@ func (s State) ensurePlatform() State { return s } -func (s State) WithValue(k, v interface{}) State { - return s.withValue(k, func(context.Context, *Constraints) (interface{}, error) { +func (s State) WithValue(k, v any) State { + return s.withValue(k, func(context.Context, *Constraints) (any, error) { return v, nil }) } -func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (interface{}, error)) State { +func (s State) withValue(k any, v func(context.Context, *Constraints) (any, error)) State { return State{ out: s.Output(), prev: &s, // doesn't need to be original pointer @@ -91,7 +92,7 @@ func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (i } } -func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) (interface{}, error) { +func (s State) Value(ctx context.Context, k any, co ...ConstraintsOpt) (any, error) { c := &Constraints{} for _, f := range co { f.SetConstraintsOption(c) @@ -99,12 +100,12 @@ func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) ( return s.getValue(k)(ctx, c) } -func (s State) getValue(k interface{}) func(context.Context, *Constraints) (interface{}, error) { +func (s State) getValue(k any) func(context.Context, *Constraints) (any, error) { if s.key == k { return s.value } if s.async != nil { - return func(ctx context.Context, c *Constraints) (interface{}, error) { + return func(ctx context.Context, c *Constraints) (any, error) { target, err := s.async.Do(ctx, c) if err != nil { return nil, err @@ -271,7 +272,7 @@ func (s State) WithImageConfig(c []byte) (State, error) { OSVersion: img.OSVersion, } if img.OSFeatures != nil { - plat.OSFeatures = append([]string{}, img.OSFeatures...) + plat.OSFeatures = slices.Clone(img.OSFeatures) } s = s.Platform(plat) } @@ -321,7 +322,7 @@ func (s State) AddEnv(key, value string) State { } // AddEnvf is the same as [State.AddEnv] but with a format string. -func (s State) AddEnvf(key, value string, v ...interface{}) State { +func (s State) AddEnvf(key, value string, v ...any) State { return AddEnvf(key, value, v...)(s) } @@ -332,7 +333,7 @@ func (s State) Dir(str string) State { } // Dirf is the same as [State.Dir] but with a format string. -func (s State) Dirf(str string, v ...interface{}) State { +func (s State) Dirf(str string, v ...any) State { return Dirf(str, v...)(s) } @@ -608,7 +609,7 @@ func WithCustomName(name string) ConstraintsOpt { }) } -func WithCustomNamef(name string, a ...interface{}) ConstraintsOpt { +func WithCustomNamef(name string, a ...any) ConstraintsOpt { return WithCustomName(fmt.Sprintf(name, a...)) } @@ -746,6 +747,6 @@ func Require(filters ...string) ConstraintsOpt { }) } -func nilValue(context.Context, *Constraints) (interface{}, error) { +func nilValue(context.Context, *Constraints) (any, error) { return nil, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/prune.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/prune.go index 1d6d02df0..0b4ea2706 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/prune.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/prune.go @@ -18,9 +18,9 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti req := &controlapi.PruneRequest{ Filter: info.Filter, KeepDuration: int64(info.KeepDuration), - ReservedSpace: int64(info.ReservedSpace), - MaxUsedSpace: int64(info.MaxUsedSpace), - MinFreeSpace: int64(info.MinFreeSpace), + ReservedSpace: info.ReservedSpace, + MaxUsedSpace: info.MaxUsedSpace, + MinFreeSpace: info.MinFreeSpace, } if info.All { req.All = true @@ -33,7 +33,7 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti for { d, err := cl.Recv() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } return err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/solve.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/solve.go index 57ee82d05..603211261 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/solve.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/client/solve.go @@ -35,6 +35,7 @@ import ( type SolveOpt struct { Exports []ExportEntry + EnableSessionExporter bool LocalDirs map[string]string // Deprecated: use LocalMounts LocalMounts map[string]fsutil.FS OCIStores map[string]content.Store @@ -142,9 +143,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } contentStores := map[string]content.Store{} - for key, store := range cacheOpt.contentStores { - contentStores[key] = store - } + maps.Copy(contentStores, cacheOpt.contentStores) for key, store := range opt.OCIStores { key2 := "oci:" + key if _, ok := contentStores[key2]; ok { @@ -272,6 +271,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG Exporters: exports, ExporterDeprecated: exportDeprecated, ExporterAttrsDeprecated: exportAttrDeprecated, + EnableSessionExporter: opt.EnableSessionExporter, Session: s.ID(), Frontend: opt.Frontend, FrontendAttrs: frontendAttrs, @@ -322,7 +322,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG for { resp, err := stream.Recv() if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } return errors.Wrap(err, "failed to receive status") @@ -356,12 +356,12 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG return nil, err } var manifestDesc ocispecs.Descriptor - if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil { + if err = json.Unmarshal(manifestDescDt, &manifestDesc); err != nil { return nil, err } for _, storePath := range storesToUpdate { names := []ociindex.NameOrTag{ociindex.Tag("latest")} - if t, ok := res.ExporterResponse["image.name"]; ok { + if t, ok := res.ExporterResponse[exptypes.ExporterImageNameKey]; ok { inp := strings.Split(t, ",") names = make([]ociindex.NameOrTag, len(inp)) for i, n := range inp { @@ -402,8 +402,7 @@ func prepareSyncedFiles(def *llb.Definition, localMounts map[string]fsutil.FS) ( return nil, errors.Wrap(err, "failed to parse llb proto op") } if src := op.GetSource(); src != nil { - if strings.HasPrefix(src.Identifier, "local://") { - name := strings.TrimPrefix(src.Identifier, "local://") + if name, ok := strings.CutPrefix(src.Identifier, "local://"); ok { mount, ok := localMounts[name] if !ok { return nil, errors.Errorf("local directory %s not enabled", name) @@ -538,9 +537,7 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach func prepareMounts(opt *SolveOpt) (map[string]fsutil.FS, error) { // merge local mounts and fallback local directories together mounts := make(map[string]fsutil.FS) - for k, mount := range opt.LocalMounts { - mounts[k] = mount - } + maps.Copy(mounts, opt.LocalMounts) for k, dir := range opt.LocalDirs { mount, err := fsutil.NewFS(dir) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/cmd/buildctl/build/ssh.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/cmd/buildctl/build/ssh.go index 05e71712d..e0ddc0f59 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/cmd/buildctl/build/ssh.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/cmd/buildctl/build/ssh.go @@ -1,21 +1,40 @@ package build import ( + "strconv" "strings" "github.com/moby/buildkit/session/sshforward/sshprovider" + "github.com/pkg/errors" ) // ParseSSH parses --ssh func ParseSSH(inp []string) ([]sshprovider.AgentConfig, error) { configs := make([]sshprovider.AgentConfig, 0, len(inp)) for _, v := range inp { - parts := strings.SplitN(v, "=", 2) + key, val, ok := strings.Cut(v, "=") cfg := sshprovider.AgentConfig{ - ID: parts[0], + ID: key, } - if len(parts) > 1 { - cfg.Paths = strings.Split(parts[1], ",") + if ok { + paths := strings.Split(val, ",") + cfg.Paths = make([]string, 0, len(paths)) + + for _, p := range paths { + key, val, ok := strings.Cut(p, "=") + if ok && key == "raw" { + b, err := strconv.ParseBool(val) + if err != nil { + return nil, errors.Wrapf(err, "invalid value for 'raw': %s", val) + } + cfg.Raw = b + } else { + cfg.Paths = append(cfg.Paths, p) + } + } + } + if cfg.Raw && len(cfg.Paths) != 1 { + return nil, errors.Errorf("raw mode must supply exactly one socket path for %q", cfg.ID) } configs = append(configs, cfg) } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/errdefs/internal.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/errdefs/internal.go index 24cea7e33..bf03d0f99 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/errdefs/internal.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/errdefs/internal.go @@ -5,13 +5,13 @@ import ( "syscall" ) -type internalErr struct { +type internalError struct { error } -func (internalErr) System() {} +func (internalError) System() {} -func (err internalErr) Unwrap() error { +func (err internalError) Unwrap() error { return err.error } @@ -19,13 +19,13 @@ type system interface { System() } -var _ system = internalErr{} +var _ system = internalError{} func Internal(err error) error { if err == nil { return nil } - return internalErr{err} + return internalError{err} } func IsInternal(err error) bool { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go new file mode 100644 index 000000000..56db46945 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/executor/resources/types/systypes.go @@ -0,0 +1,72 @@ +package types + +import ( + "encoding/json" + "math" + "time" +) + +type SysCPUStat struct { + User float64 `json:"user"` + Nice float64 `json:"nice"` + System float64 `json:"system"` + Idle float64 `json:"idle"` + Iowait float64 `json:"iowait"` + IRQ float64 `json:"irq"` + SoftIRQ float64 `json:"softirq"` + Steal float64 `json:"steal"` + Guest float64 `json:"guest"` + GuestNice float64 `json:"guestNice"` +} + +type sysCPUStatAlias SysCPUStat // avoid recursion of MarshalJSON + +func (s SysCPUStat) MarshalJSON() ([]byte, error) { + return json.Marshal(sysCPUStatAlias{ + User: math.Round(s.User*1000) / 1000, + Nice: math.Round(s.Nice*1000) / 1000, + System: math.Round(s.System*1000) / 1000, + Idle: math.Round(s.Idle*1000) / 1000, + Iowait: math.Round(s.Iowait*1000) / 1000, + IRQ: math.Round(s.IRQ*1000) / 1000, + SoftIRQ: math.Round(s.SoftIRQ*1000) / 1000, + Steal: math.Round(s.Steal*1000) / 1000, + Guest: math.Round(s.Guest*1000) / 1000, + GuestNice: math.Round(s.GuestNice*1000) / 1000, + }) +} + +type ProcStat struct { + ContextSwitches uint64 `json:"contextSwitches"` + ProcessCreated uint64 `json:"processCreated"` + ProcessesRunning uint64 `json:"processesRunning"` +} + +type SysMemoryStat struct { + Total *uint64 `json:"total"` + Free *uint64 `json:"free"` + Available *uint64 `json:"available"` + Buffers *uint64 `json:"buffers"` + Cached *uint64 `json:"cached"` + Active *uint64 `json:"active"` + Inactive *uint64 `json:"inactive"` + Swap *uint64 `json:"swap"` + Dirty *uint64 `json:"dirty"` + Writeback *uint64 `json:"writeback"` + Slab *uint64 `json:"slab"` +} + +type SysSample struct { + //nolint + Timestamp_ time.Time `json:"timestamp"` + CPUStat *SysCPUStat `json:"cpuStat,omitempty"` + ProcStat *ProcStat `json:"procStat,omitempty"` + MemoryStat *SysMemoryStat `json:"memoryStat,omitempty"` + CPUPressure *Pressure `json:"cpuPressure,omitempty"` + MemoryPressure *Pressure `json:"memoryPressure,omitempty"` + IOPressure *Pressure `json:"ioPressure,omitempty"` +} + +func (s *SysSample) Timestamp() time.Time { + return s.Timestamp_ +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/executor/resources/types/types.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/executor/resources/types/types.go new file mode 100644 index 000000000..536b99230 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/executor/resources/types/types.go @@ -0,0 +1,113 @@ +package types + +import ( + "context" + "time" +) + +type Recorder interface { + Start() + Close() + CloseAsync(func(context.Context) error) error + Wait() error + Samples() (*Samples, error) +} + +type Samples struct { + Samples []*Sample `json:"samples,omitempty"` + SysCPUStat *SysCPUStat `json:"sysCPUStat,omitempty"` +} + +// Sample represents a wrapper for sampled data of cgroupv2 controllers +type Sample struct { + //nolint + Timestamp_ time.Time `json:"timestamp"` + CPUStat *CPUStat `json:"cpuStat,omitempty"` + MemoryStat *MemoryStat `json:"memoryStat,omitempty"` + IOStat *IOStat `json:"ioStat,omitempty"` + PIDsStat *PIDsStat `json:"pidsStat,omitempty"` + NetStat *NetworkSample `json:"netStat,omitempty"` +} + +func (s *Sample) Timestamp() time.Time { + return s.Timestamp_ +} + +type NetworkSample struct { + RxBytes int64 `json:"rxBytes,omitempty"` + RxPackets int64 `json:"rxPackets,omitempty"` + RxErrors int64 `json:"rxErrors,omitempty"` + RxDropped int64 `json:"rxDropped,omitempty"` + TxBytes int64 `json:"txBytes,omitempty"` + TxPackets int64 `json:"txPackets,omitempty"` + TxErrors int64 `json:"txErrors,omitempty"` + TxDropped int64 `json:"txDropped,omitempty"` +} + +// CPUStat represents the sampling state of the cgroupv2 CPU controller +type CPUStat struct { + UsageNanos *uint64 `json:"usageNanos,omitempty"` + UserNanos *uint64 `json:"userNanos,omitempty"` + SystemNanos *uint64 `json:"systemNanos,omitempty"` + NrPeriods *uint32 `json:"nrPeriods,omitempty"` + NrThrottled *uint32 `json:"nrThrottled,omitempty"` + ThrottledNanos *uint64 `json:"throttledNanos,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// MemoryStat represents the sampling state of the cgroupv2 memory controller +type MemoryStat struct { + SwapBytes *uint64 `json:"swapBytes,omitempty"` + Anon *uint64 `json:"anon,omitempty"` + File *uint64 `json:"file,omitempty"` + Kernel *uint64 `json:"kernel,omitempty"` + KernelStack *uint64 `json:"kernelStack,omitempty"` + PageTables *uint64 `json:"pageTables,omitempty"` + Sock *uint64 `json:"sock,omitempty"` + Vmalloc *uint64 `json:"vmalloc,omitempty"` + Shmem *uint64 `json:"shmem,omitempty"` + FileMapped *uint64 `json:"fileMapped,omitempty"` + FileDirty *uint64 `json:"fileDirty,omitempty"` + FileWriteback *uint64 `json:"fileWriteback,omitempty"` + Slab *uint64 `json:"slab,omitempty"` + Pgscan *uint64 `json:"pgscan,omitempty"` + Pgsteal *uint64 `json:"pgsteal,omitempty"` + Pgfault *uint64 `json:"pgfault,omitempty"` + Pgmajfault *uint64 `json:"pgmajfault,omitempty"` + Peak *uint64 `json:"peak,omitempty"` + LowEvents uint64 `json:"lowEvents,omitempty"` + HighEvents uint64 `json:"highEvents,omitempty"` + MaxEvents uint64 `json:"maxEvents,omitempty"` + OomEvents uint64 `json:"oomEvents,omitempty"` + OomKillEvents uint64 `json:"oomKillEvents,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// IOStat represents the sampling state of the cgroupv2 IO controller +type IOStat struct { + ReadBytes *uint64 `json:"readBytes,omitempty"` + WriteBytes *uint64 `json:"writeBytes,omitempty"` + DiscardBytes *uint64 `json:"discardBytes,omitempty"` + ReadIOs *uint64 `json:"readIOs,omitempty"` + WriteIOs *uint64 `json:"writeIOs,omitempty"` + DiscardIOs *uint64 `json:"discardIOs,omitempty"` + Pressure *Pressure `json:"pressure,omitempty"` +} + +// PIDsStat represents the sampling state of the cgroupv2 PIDs controller +type PIDsStat struct { + Current *uint64 `json:"current,omitempty"` +} + +// Pressure represents the sampling state of pressure files +type Pressure struct { + Some *PressureValues `json:"some"` + Full *PressureValues `json:"full"` +} + +type PressureValues struct { + Avg10 *float64 `json:"avg10"` + Avg60 *float64 `json:"avg60"` + Avg300 *float64 `json:"avg300"` + Total *uint64 `json:"total"` +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go index 8eb2064e1..7cfa8dc02 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/keys.go @@ -26,11 +26,17 @@ var ( // Value: bool OptKeyUnpack ImageExporterOptKey = "unpack" - // Fallback image name prefix if image name isn't provided. - // If used, image will be named as @ + // Image name prefix to be used for tagging a dangling image. + // If used, image will be named as @ in addition + // to any other specified names. // Value: string OptKeyDanglingPrefix ImageExporterOptKey = "dangling-name-prefix" + // Only use the dangling image name as a fallback if image name isn't provided. + // Ignored if dangling-name-prefix is not set. + // Value: bool + OptKeyDanglingEmptyOnly ImageExporterOptKey = "danging-name-empty-only" + // Creates additional image name with format @ // Value: bool OptKeyNameCanonical ImageExporterOptKey = "name-canonical" diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go index cd2256b56..4d2b02f5d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go @@ -32,7 +32,7 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) { return ps, nil } - p := platforms.DefaultSpec() + var p ocispecs.Platform if imgConfig, ok := meta[ExporterImageConfigKey]; ok { var img ocispecs.Image err := json.Unmarshal(imgConfig, &img) @@ -51,6 +51,8 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) { } else if img.OS != "" || img.Architecture != "" { return Platforms{}, errors.Errorf("invalid image config: os and architecture must be specified together") } + } else { + p = platforms.DefaultSpec() } p = platforms.Normalize(p) pk := platforms.FormatAll(p) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go index 056485b66..d7cc98b94 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -9,6 +9,7 @@ import ( const ( ExporterConfigDigestKey = "config.digest" + ExporterImageNameKey = "image.name" ExporterImageDigestKey = "containerimage.digest" ExporterImageConfigKey = "containerimage.config" ExporterImageConfigDigestKey = "containerimage.config.digest" diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/attestations/parse.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/attestations/parse.go index a3d5ff5db..6a2b6e181 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/attestations/parse.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/attestations/parse.go @@ -3,6 +3,7 @@ package attestations import ( "strings" + provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types" "github.com/pkg/errors" "github.com/tonistiigi/go-csvvalue" ) @@ -14,6 +15,7 @@ const ( const ( defaultSBOMGenerator = "docker/buildkit-syft-scanner:stable-1" + defaultSLSAVersion = string(provenancetypes.ProvenanceSLSA02) ) func Filter(v map[string]string) map[string]string { @@ -43,12 +45,12 @@ func Validate(values map[string]map[string]string) (map[string]map[string]string func Parse(values map[string]string) (map[string]map[string]string, error) { attests := make(map[string]string) for k, v := range values { - if strings.HasPrefix(k, "attest:") { - attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v + if after, ok := strings.CutPrefix(k, "attest:"); ok { + attests[strings.ToLower(after)] = v continue } - if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") { - attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v + if after, ok := strings.CutPrefix(k, "build-arg:BUILDKIT_ATTEST_"); ok { + attests[strings.ToLower(after)] = v continue } } @@ -57,8 +59,11 @@ func Parse(values map[string]string) (map[string]map[string]string, error) { for k, v := range attests { attrs := make(map[string]string) out[k] = attrs - if k == KeyTypeSbom { + switch k { + case KeyTypeSbom: attrs["generator"] = defaultSBOMGenerator + case KeyTypeProvenance: + attrs["version"] = defaultSLSAVersion } if v == "" { continue diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go index b194a97ec..f82f07f9d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go @@ -246,6 +246,7 @@ type AddCommand struct { ExcludePatterns []string KeepGitDir bool // whether to keep .git dir, only meaningful for git sources Checksum string + Unpack *bool } func (c *AddCommand) Expand(expander SingleWordExpander) error { @@ -551,16 +552,16 @@ func HasStage(s []Stage, name string) (int, bool) { } type withExternalData struct { - m map[interface{}]interface{} + m map[any]any } -func (c *withExternalData) getExternalValue(k interface{}) interface{} { +func (c *withExternalData) getExternalValue(k any) any { return c.m[k] } -func (c *withExternalData) setExternalValue(k, v interface{}) { +func (c *withExternalData) setExternalValue(k, v any) { if c.m == nil { - c.m = map[interface{}]interface{}{} + c.m = map[any]any{} } c.m[k] = v } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 49c8817cb..ccb0820fa 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -7,7 +7,7 @@ package instructions import ( "fmt" "regexp" - "sort" + "slices" "strconv" "strings" "time" @@ -66,12 +66,12 @@ func newParseRequestFromNode(node *parser.Node) parseRequest { } } -func ParseInstruction(node *parser.Node) (v interface{}, err error) { +func ParseInstruction(node *parser.Node) (v any, err error) { return ParseInstructionWithLinter(node, nil) } // ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement) -func ParseInstructionWithLinter(node *parser.Node, lint *linter.Linter) (v interface{}, err error) { +func ParseInstructionWithLinter(node *parser.Node, lint *linter.Linter) (v any, err error) { defer func() { if err != nil { err = parser.WithLocation(err, node.Location()) @@ -338,6 +338,7 @@ func parseAdd(req parseRequest) (*AddCommand, error) { flLink := req.flags.AddBool("link", false) flKeepGitDir := req.flags.AddBool("keep-git-dir", false) flChecksum := req.flags.AddString("checksum", "") + flUnpack := req.flags.AddBool("unpack", false) if err := req.flags.Parse(); err != nil { return nil, err } @@ -347,6 +348,12 @@ func parseAdd(req parseRequest) (*AddCommand, error) { return nil, err } + var unpack *bool + if _, ok := req.flags.used["unpack"]; ok { + b := flUnpack.Value == "true" + unpack = &b + } + return &AddCommand{ withNameAndCode: newWithNameAndCode(req), SourcesAndDest: *sourcesAndDest, @@ -356,6 +363,7 @@ func parseAdd(req parseRequest) (*AddCommand, error) { KeepGitDir: flKeepGitDir.Value == "true", Checksum: flChecksum.Value, ExcludePatterns: stringValuesFromFlagIfPossible(flExcludes), + Unpack: unpack, }, nil } @@ -687,7 +695,7 @@ func parseExpose(req parseRequest) (*ExposeCommand, error) { return nil, err } - sort.Strings(portsTab) + slices.Sort(portsTab) return &ExposeCommand{ Ports: portsTab, withNameAndCode: newWithNameAndCode(req), @@ -831,8 +839,8 @@ func getComment(comments []string, name string) string { return "" } for _, line := range comments { - if strings.HasPrefix(line, name+" ") { - return strings.TrimPrefix(line, name+" ") + if after, ok := strings.CutPrefix(line, name+" "); ok { + return after } } return "" @@ -880,10 +888,8 @@ func validateDefinitionDescription(instruction string, argKeys []string, descCom return } descCommentParts := strings.Split(descComments[len(descComments)-1], " ") - for _, key := range argKeys { - if key == descCommentParts[0] { - return - } + if slices.Contains(argKeys, descCommentParts[0]) { + return } exampleKey := argKeys[0] if len(argKeys) > 1 { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go index 890acc53b..405f6923d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/linter/linter.go @@ -55,7 +55,7 @@ func (lc *Linter) Run(rule LinterRuleI, location []parser.Range, txt ...string) rulename := rule.RuleName() if rule.IsExperimental() { _, experimentalOk := lc.ExperimentalRules[rulename] - if !(lc.ExperimentalAll || experimentalOk) { + if !lc.ExperimentalAll && !experimentalOk { return } } else { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go index 4f1e1300e..b757e7548 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go @@ -148,14 +148,16 @@ func parseDirective(key string, dt []byte, anyFormat bool) (string, string, []Ra } // use json directive, and search for { "key": "..." } - jsonDirective := map[string]string{} + jsonDirective := map[string]any{} if err := json.Unmarshal(dt, &jsonDirective); err == nil { - if v, ok := jsonDirective[key]; ok { - loc := []Range{{ - Start: Position{Line: line}, - End: Position{Line: line}, - }} - return v, v, loc, true + if vAny, ok := jsonDirective[key]; ok { + if v, ok := vAny.(string); ok { + loc := []Range{{ + Start: Position{Line: line}, + End: Position{Line: line}, + }} + return v, v, loc, true + } } } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go index 08a40c5d4..b3bcf2ae8 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/errors.go @@ -5,14 +5,14 @@ import ( "github.com/pkg/errors" ) -// ErrorLocation gives a location in source code that caused the error -type ErrorLocation struct { +// LocationError gives a location in source code that caused the error +type LocationError struct { Locations [][]Range error } // Unwrap unwraps to the next error -func (e *ErrorLocation) Unwrap() error { +func (e *LocationError) Unwrap() error { return e.error } @@ -45,7 +45,7 @@ func setLocation(err error, location []Range, add bool) error { if err == nil { return nil } - var el *ErrorLocation + var el *LocationError if errors.As(err, &el) { if add { el.Locations = append(el.Locations, location) @@ -54,7 +54,7 @@ func setLocation(err error, location []Range, add bool) error { } return err } - return stack.Enable(&ErrorLocation{ + return stack.Enable(&LocationError{ error: err, Locations: [][]Range{location}, }) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go index f8d891c71..eb86e73b4 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go @@ -281,7 +281,7 @@ func parseJSON(rest string) (*Node, map[string]bool, error) { return nil, nil, errDockerfileNotJSONArray } - var myJSON []interface{} + var myJSON []any if err := json.Unmarshal([]byte(rest), &myJSON); err != nil { return nil, nil, err } @@ -318,7 +318,7 @@ func parseMaybeJSON(rest string, d *directives) (*Node, map[string]bool, error) if err == nil { return node, attrs, nil } - if err == errDockerfileNotStringArray { + if errors.Is(err, errDockerfileNotStringArray) { return nil, nil, err } @@ -336,7 +336,7 @@ func parseMaybeJSONToList(rest string, d *directives) (*Node, map[string]bool, e if err == nil { return node, attrs, nil } - if err == errDockerfileNotStringArray { + if errors.Is(err, errDockerfileNotStringArray) { return nil, nil, err } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index 1b0a96228..2f0da0222 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -114,7 +114,7 @@ type Heredoc struct { var ( dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`) + reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)\s*([^<]*)$`) reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) ) @@ -220,7 +220,7 @@ func init() { // based on the command and command arguments. A Node is created from the // result of the dispatch. func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) { - cmd, flags, args, err := splitCommand(line) + cmd, flags, args, err := splitCommand(line, d) if err != nil { return nil, err } @@ -556,8 +556,8 @@ func scanLines(data []byte, atEOF bool) (advance int, token []byte, err error) { } func handleScannerError(err error) error { - switch err { - case bufio.ErrTooLong: + switch { + case errors.Is(err, bufio.ErrTooLong): return errors.Errorf("dockerfile line greater than max allowed size of %d", bufio.MaxScanTokenSize-1) default: return err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go index d1c87522e..87b73f24f 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go @@ -7,7 +7,7 @@ import ( // splitCommand takes a single line of text and parses out the cmd and args, // which are used for dispatching to more exact parsing functions. -func splitCommand(line string) (string, []string, string, error) { +func splitCommand(line string, d *directives) (string, []string, string, error) { var args string var flags []string @@ -16,7 +16,7 @@ func splitCommand(line string) (string, []string, string, error) { if len(cmdline) == 2 { var err error - args, flags, err = extractBuilderFlags(cmdline[1]) + args, flags, err = extractBuilderFlags(cmdline[1], d) if err != nil { return "", nil, "", err } @@ -25,7 +25,7 @@ func splitCommand(line string) (string, []string, string, error) { return cmdline[0], flags, strings.TrimSpace(args), nil } -func extractBuilderFlags(line string) (string, []string, error) { +func extractBuilderFlags(line string, d *directives) (string, []string, error) { // Parses the BuilderFlags and returns the remaining part of the line const ( @@ -87,7 +87,7 @@ func extractBuilderFlags(line string) (string, []string, error) { phase = inQuote continue } - if ch == '\\' { + if ch == d.escapeToken { if pos+1 == len(line) { continue // just skip \ at end } @@ -104,7 +104,7 @@ func extractBuilderFlags(line string) (string, []string, error) { phase = inWord continue } - if ch == '\\' { + if ch == d.escapeToken { if pos+1 == len(line) { phase = inWord continue // just skip \ at end diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index 18da29334..b8a307a06 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -177,6 +177,7 @@ func (sw *shellWord) processStopOn(stopChar rune, rawEscapes bool) (string, []st // no need to initialize all the time var charFuncMapping = map[rune]func() (string, error){ '$': sw.processDollar, + '<': sw.processPossibleHeredoc, } if !sw.SkipProcessQuotes { charFuncMapping['\''] = sw.processSingleQuote @@ -512,6 +513,25 @@ func (sw *shellWord) processName() string { return name.String() } +func (sw *shellWord) processPossibleHeredoc() (string, error) { + sw.scanner.Next() + if sw.scanner.Peek() != '<' { + return "<", nil // not a heredoc + } + sw.scanner.Next() + + // heredoc might have whitespace between << and word terminator + var space bytes.Buffer + nextCh := sw.scanner.Peek() + for isWhitespace(nextCh) { + space.WriteRune(nextCh) + sw.scanner.Next() + nextCh = sw.scanner.Peek() + } + result := "<<" + space.String() + return result, nil +} + // isSpecialParam checks if the provided character is a special parameters, // as defined in http://pubs.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_05_02 func isSpecialParam(char rune) bool { @@ -677,3 +697,11 @@ func trimSuffix(pattern, word string, greedy bool) (string, error) { } return reverseString(str), nil } + +func isWhitespace(r rune) bool { + switch r { + case '\t', '\r', ' ': + return true + } + return false +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go index 259100b46..e06d70e20 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/attr.go @@ -128,8 +128,8 @@ func parseSourceDateEpoch(v string) (*time.Time, error) { func parseLocalSessionIDs(opt map[string]string) map[string]string { m := map[string]string{} for k, v := range opt { - if strings.HasPrefix(k, localSessionIDPrefix) { - m[strings.TrimPrefix(k, localSessionIDPrefix)] = v + if after, ok := strings.CutPrefix(k, localSessionIDPrefix); ok { + m[after] = v } } return m @@ -138,8 +138,8 @@ func parseLocalSessionIDs(opt map[string]string) map[string]string { func filter(opt map[string]string, key string) map[string]string { m := map[string]string{} for k, v := range opt { - if strings.HasPrefix(k, key) { - m[strings.TrimPrefix(k, key)] = v + if after, ok := strings.CutPrefix(k, key); ok { + m[after] = v } } return m diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/build.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/build.go index cc93dfaf3..233329b61 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/build.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/build.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "slices" "github.com/containerd/platforms" "github.com/moby/buildkit/exporter/containerimage/exptypes" @@ -21,7 +22,6 @@ func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, erro targets := make([]*ocispecs.Platform, 0, len(bc.TargetPlatforms)) for _, p := range bc.TargetPlatforms { - p := p targets = append(targets, &p) } if len(targets) == 0 { @@ -54,29 +54,18 @@ func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, erro } } - p := platforms.DefaultSpec() + var p ocispecs.Platform if tp != nil { p = *tp + } else { + p = platforms.DefaultSpec() } - - // in certain conditions we allow input platform to be extended from base image - if p.OS == "windows" && img.OS == p.OS { - if p.OSVersion == "" && img.OSVersion != "" { - p.OSVersion = img.OSVersion - } - if p.OSFeatures == nil && len(img.OSFeatures) > 0 { - p.OSFeatures = append([]string{}, img.OSFeatures...) - } - } - - p = platforms.Normalize(p) - k := platforms.FormatAll(p) - + expPlat := makeExportPlatform(p, img.Platform) if bc.MultiPlatformRequested { - res.AddRef(k, ref) - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) + res.AddRef(expPlat.ID, ref) + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, expPlat.ID), config) if len(baseConfig) > 0 { - res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageBaseConfigKey, k), baseConfig) + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageBaseConfigKey, expPlat.ID), baseConfig) } } else { res.SetRef(ref) @@ -85,10 +74,7 @@ func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, erro res.AddMeta(exptypes.ExporterImageBaseConfigKey, baseConfig) } } - expPlatforms.Platforms[i] = exptypes.Platform{ - ID: k, - Platform: p, - } + expPlatforms.Platforms[i] = expPlat return nil }) } @@ -119,10 +105,35 @@ func (rb *ResultBuilder) Finalize() (*client.Result, error) { func (rb *ResultBuilder) EachPlatform(ctx context.Context, fn func(ctx context.Context, id string, p ocispecs.Platform) error) error { eg, ctx := errgroup.WithContext(ctx) for _, p := range rb.expPlatforms.Platforms { - p := p eg.Go(func() error { return fn(ctx, p.ID, p.Platform) }) } return eg.Wait() } + +func extendWindowsPlatform(p, imgP ocispecs.Platform) ocispecs.Platform { + // in certain conditions we allow input platform to be extended from base image + if p.OS == "windows" && imgP.OS == p.OS { + if p.OSVersion == "" && imgP.OSVersion != "" { + p.OSVersion = imgP.OSVersion + } + if p.OSFeatures == nil && len(imgP.OSFeatures) > 0 { + p.OSFeatures = slices.Clone(imgP.OSFeatures) + } + } + return p +} + +func makeExportPlatform(p, imgP ocispecs.Platform) exptypes.Platform { + p = platforms.Normalize(p) + exp := exptypes.Platform{ + ID: platforms.FormatAll(p), + } + if p.OS == "windows" { + p = extendWindowsPlatform(p, imgP) + p = platforms.Normalize(p) + } + exp.Platform = p + return exp +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/config.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/config.go index ee8a26675..6c3f9f5f3 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/config.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/dockerui/config.go @@ -7,6 +7,7 @@ import ( "path" "strconv" "strings" + "sync" "time" "github.com/containerd/platforms" @@ -85,6 +86,7 @@ type Client struct { localsSessionIDs map[string]string dockerignore []byte + dockerignoreMu sync.Mutex dockerignoreName string } @@ -148,9 +150,11 @@ func (bc *Client) BuildOpts() client.BuildOpts { func (bc *Client) init() error { opts := bc.bopts.Opts - defaultBuildPlatform := platforms.Normalize(platforms.DefaultSpec()) + var defaultBuildPlatform ocispecs.Platform if workers := bc.bopts.Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { defaultBuildPlatform = workers[0].Platforms[0] + } else { + defaultBuildPlatform = platforms.Normalize(platforms.DefaultSpec()) } buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} targetPlatforms := []ocispecs.Platform{} @@ -459,9 +463,11 @@ func (bc *Client) NamedContext(name string, opt ContextOpt) (*NamedContext, erro } name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") - pp := platforms.DefaultSpec() + var pp ocispecs.Platform if opt.Platform != nil { pp = *opt.Platform + } else { + pp = platforms.DefaultSpec() } pname := name + "::" + platforms.FormatAll(platforms.Normalize(pp)) nc, err := bc.namedContext(name, pname, opt) @@ -512,6 +518,8 @@ func WithInternalName(name string) llb.ConstraintsOpt { } func (bc *Client) dockerIgnorePatterns(ctx context.Context, bctx *buildContext) ([]string, error) { + bc.dockerignoreMu.Lock() + defer bc.dockerignoreMu.Unlock() if bc.dockerignore == nil { sessionID := bc.bopts.SessionID if v, ok := bc.localsSessionIDs[bctx.contextLocalName]; ok { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go index 615700f51..93fee1262 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -44,7 +44,7 @@ type GrpcClient interface { func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) { pingCtx, pingCancel := context.WithCancelCause(ctx) - pingCtx, _ = context.WithTimeoutCause(pingCtx, 15*time.Second, errors.WithStack(context.DeadlineExceeded)) + pingCtx, _ = context.WithTimeoutCause(pingCtx, 15*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet defer pingCancel(errors.WithStack(context.Canceled)) resp, err := c.Ping(pingCtx, &pb.PingRequest{}) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go index 006aeee2e..7e43259e2 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -1,4 +1,4 @@ -package moby_buildkit_v1_frontend //nolint:revive +package moby_buildkit_v1_frontend //nolint:revive,staticcheck import "github.com/moby/buildkit/util/apicaps" diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go index d978bfa66..57007185c 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go @@ -1,4 +1,4 @@ -package moby_buildkit_v1_frontend //nolint:revive +package moby_buildkit_v1_frontend //nolint:revive,staticcheck import ( "fmt" diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go index 089043910..e2252340d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -17,6 +17,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -119,20 +120,19 @@ func (InTotoSubjectKind) EnumDescriptor() ([]byte, []int) { } type Result struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Result: + state protoimpl.MessageState `protogen:"open.v1"` + // Types that are valid to be assigned to Result: // // *Result_RefDeprecated // *Result_RefsDeprecated // *Result_Ref // *Result_Refs Result isResult_Result `protobuf_oneof:"result"` - Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // 11 was used during development and is reserved for old attestation format - Attestations map[string]*Attestations `protobuf:"bytes,12,rep,name=attestations,proto3" json:"attestations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Attestations map[string]*Attestations `protobuf:"bytes,12,rep,name=attestations,proto3" json:"attestations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Result) Reset() { @@ -165,37 +165,45 @@ func (*Result) Descriptor() ([]byte, []int) { return file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescGZIP(), []int{0} } -func (m *Result) GetResult() isResult_Result { - if m != nil { - return m.Result +func (x *Result) GetResult() isResult_Result { + if x != nil { + return x.Result } return nil } func (x *Result) GetRefDeprecated() string { - if x, ok := x.GetResult().(*Result_RefDeprecated); ok { - return x.RefDeprecated + if x != nil { + if x, ok := x.Result.(*Result_RefDeprecated); ok { + return x.RefDeprecated + } } return "" } func (x *Result) GetRefsDeprecated() *RefMapDeprecated { - if x, ok := x.GetResult().(*Result_RefsDeprecated); ok { - return x.RefsDeprecated + if x != nil { + if x, ok := x.Result.(*Result_RefsDeprecated); ok { + return x.RefsDeprecated + } } return nil } func (x *Result) GetRef() *Ref { - if x, ok := x.GetResult().(*Result_Ref); ok { - return x.Ref + if x != nil { + if x, ok := x.Result.(*Result_Ref); ok { + return x.Ref + } } return nil } func (x *Result) GetRefs() *RefMap { - if x, ok := x.GetResult().(*Result_Refs); ok { - return x.Refs + if x != nil { + if x, ok := x.Result.(*Result_Refs); ok { + return x.Refs + } } return nil } @@ -244,11 +252,10 @@ func (*Result_Ref) isResult_Result() {} func (*Result_Refs) isResult_Result() {} type RefMapDeprecated struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Refs map[string]string `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Refs map[string]string `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *RefMapDeprecated) Reset() { @@ -289,12 +296,11 @@ func (x *RefMapDeprecated) GetRefs() map[string]string { } type Ref struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Def *pb.Definition `protobuf:"bytes,2,opt,name=def,proto3" json:"def,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Def *pb.Definition `protobuf:"bytes,2,opt,name=def,proto3" json:"def,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Ref) Reset() { @@ -342,11 +348,10 @@ func (x *Ref) GetDef() *pb.Definition { } type RefMap struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Refs map[string]*Ref `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Refs map[string]*Ref `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *RefMap) Reset() { @@ -387,11 +392,10 @@ func (x *RefMap) GetRefs() map[string]*Ref { } type Attestations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Attestation []*Attestation `protobuf:"bytes,1,rep,name=attestation,proto3" json:"attestation,omitempty"` unknownFields protoimpl.UnknownFields - - Attestation []*Attestation `protobuf:"bytes,1,rep,name=attestation,proto3" json:"attestation,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Attestations) Reset() { @@ -432,16 +436,15 @@ func (x *Attestations) GetAttestation() []*Attestation { } type Attestation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Kind AttestationKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.AttestationKind" json:"kind,omitempty"` - Metadata map[string][]byte `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Ref *Ref `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` - Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` - InTotoPredicateType string `protobuf:"bytes,5,opt,name=inTotoPredicateType,proto3" json:"inTotoPredicateType,omitempty"` - InTotoSubjects []*InTotoSubject `protobuf:"bytes,6,rep,name=inTotoSubjects,proto3" json:"inTotoSubjects,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Kind AttestationKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.AttestationKind" json:"kind,omitempty"` + Metadata map[string][]byte `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Ref *Ref `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` + Path string `protobuf:"bytes,4,opt,name=path,proto3" json:"path,omitempty"` + InTotoPredicateType string `protobuf:"bytes,5,opt,name=inTotoPredicateType,proto3" json:"inTotoPredicateType,omitempty"` + InTotoSubjects []*InTotoSubject `protobuf:"bytes,6,rep,name=inTotoSubjects,proto3" json:"inTotoSubjects,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Attestation) Reset() { @@ -517,13 +520,12 @@ func (x *Attestation) GetInTotoSubjects() []*InTotoSubject { } type InTotoSubject struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Kind InTotoSubjectKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.InTotoSubjectKind" json:"kind,omitempty"` + Digest []string `protobuf:"bytes,2,rep,name=digest,proto3" json:"digest,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` unknownFields protoimpl.UnknownFields - - Kind InTotoSubjectKind `protobuf:"varint,1,opt,name=kind,proto3,enum=moby.buildkit.v1.frontend.InTotoSubjectKind" json:"kind,omitempty"` - Digest []string `protobuf:"bytes,2,rep,name=digest,proto3" json:"digest,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *InTotoSubject) Reset() { @@ -578,12 +580,11 @@ func (x *InTotoSubject) GetName() string { } type ReturnRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` unknownFields protoimpl.UnknownFields - - Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` - Error *status.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ReturnRequest) Reset() { @@ -631,9 +632,9 @@ func (x *ReturnRequest) GetError() *status.Status { } type ReturnResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReturnResponse) Reset() { @@ -667,9 +668,9 @@ func (*ReturnResponse) Descriptor() ([]byte, []int) { } type InputsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *InputsRequest) Reset() { @@ -703,11 +704,10 @@ func (*InputsRequest) Descriptor() ([]byte, []int) { } type InputsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Definitions map[string]*pb.Definition `protobuf:"bytes,1,rep,name=Definitions,proto3" json:"Definitions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Definitions map[string]*pb.Definition `protobuf:"bytes,1,rep,name=Definitions,proto3" json:"Definitions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *InputsResponse) Reset() { @@ -748,18 +748,17 @@ func (x *InputsResponse) GetDefinitions() map[string]*pb.Definition { } type ResolveImageConfigRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` - ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` - LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` - ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` - SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` - StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` - SourcePolicies []*pb1.Policy `protobuf:"bytes,8,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` + ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` + LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` + ResolverType int32 `protobuf:"varint,5,opt,name=ResolverType,proto3" json:"ResolverType,omitempty"` + SessionID string `protobuf:"bytes,6,opt,name=SessionID,proto3" json:"SessionID,omitempty"` + StoreID string `protobuf:"bytes,7,opt,name=StoreID,proto3" json:"StoreID,omitempty"` + SourcePolicies []*pb1.Policy `protobuf:"bytes,8,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ResolveImageConfigRequest) Reset() { @@ -849,13 +848,12 @@ func (x *ResolveImageConfigRequest) GetSourcePolicies() []*pb1.Policy { } type ResolveImageConfigResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Digest string `protobuf:"bytes,1,opt,name=Digest,proto3" json:"Digest,omitempty"` + Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` + Ref string `protobuf:"bytes,3,opt,name=Ref,proto3" json:"Ref,omitempty"` unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=Digest,proto3" json:"Digest,omitempty"` - Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` - Ref string `protobuf:"bytes,3,opt,name=Ref,proto3" json:"Ref,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ResolveImageConfigResponse) Reset() { @@ -910,15 +908,14 @@ func (x *ResolveImageConfigResponse) GetRef() string { } type ResolveSourceMetaRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Source *pb.SourceOp `protobuf:"bytes,1,opt,name=Source,proto3" json:"Source,omitempty"` - Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` - LogName string `protobuf:"bytes,3,opt,name=LogName,proto3" json:"LogName,omitempty"` - ResolveMode string `protobuf:"bytes,4,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` - SourcePolicies []*pb1.Policy `protobuf:"bytes,8,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Source *pb.SourceOp `protobuf:"bytes,1,opt,name=Source,proto3" json:"Source,omitempty"` + Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` + LogName string `protobuf:"bytes,3,opt,name=LogName,proto3" json:"LogName,omitempty"` + ResolveMode string `protobuf:"bytes,4,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` + SourcePolicies []*pb1.Policy `protobuf:"bytes,8,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ResolveSourceMetaRequest) Reset() { @@ -987,12 +984,11 @@ func (x *ResolveSourceMetaRequest) GetSourcePolicies() []*pb1.Policy { } type ResolveSourceMetaResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Source *pb.SourceOp `protobuf:"bytes,1,opt,name=Source,proto3" json:"Source,omitempty"` + Image *ResolveSourceImageResponse `protobuf:"bytes,2,opt,name=Image,proto3" json:"Image,omitempty"` unknownFields protoimpl.UnknownFields - - Source *pb.SourceOp `protobuf:"bytes,1,opt,name=Source,proto3" json:"Source,omitempty"` - Image *ResolveSourceImageResponse `protobuf:"bytes,2,opt,name=Image,proto3" json:"Image,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ResolveSourceMetaResponse) Reset() { @@ -1040,12 +1036,11 @@ func (x *ResolveSourceMetaResponse) GetImage() *ResolveSourceImageResponse { } type ResolveSourceImageResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Digest string `protobuf:"bytes,1,opt,name=Digest,proto3" json:"Digest,omitempty"` + Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=Digest,proto3" json:"Digest,omitempty"` - Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ResolveSourceImageResponse) Reset() { @@ -1093,13 +1088,10 @@ func (x *ResolveSourceImageResponse) GetConfig() []byte { } type SolveRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` - Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt,proto3" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` + Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` + FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt,proto3" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // 4 was removed in BuildKit v0.11.0. AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` @@ -1110,9 +1102,11 @@ type SolveRequest struct { // apicaps:CapImportCaches CacheImports []*CacheOptionsEntry `protobuf:"bytes,12,rep,name=CacheImports,proto3" json:"CacheImports,omitempty"` // apicaps:CapFrontendInputs - FrontendInputs map[string]*pb.Definition `protobuf:"bytes,13,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + FrontendInputs map[string]*pb.Definition `protobuf:"bytes,13,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Evaluate bool `protobuf:"varint,14,opt,name=Evaluate,proto3" json:"Evaluate,omitempty"` SourcePolicies []*pb1.Policy `protobuf:"bytes,15,rep,name=SourcePolicies,proto3" json:"SourcePolicies,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SolveRequest) Reset() { @@ -1224,12 +1218,11 @@ func (x *SolveRequest) GetSourcePolicies() []*pb1.Policy { // CacheOptionsEntry corresponds to the control.CacheOptionsEntry type CacheOptionsEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *CacheOptionsEntry) Reset() { @@ -1277,14 +1270,13 @@ func (x *CacheOptionsEntry) GetAttrs() map[string]string { } type SolveResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // deprecated Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` // can be used by readfile request // these fields are returned when allowMapReturn was set - Result *Result `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + Result *Result `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SolveResponse) Reset() { @@ -1332,13 +1324,12 @@ func (x *SolveResponse) GetResult() *Result { } type ReadFileRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + FilePath string `protobuf:"bytes,2,opt,name=FilePath,proto3" json:"FilePath,omitempty"` + Range *FileRange `protobuf:"bytes,3,opt,name=Range,proto3" json:"Range,omitempty"` unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - FilePath string `protobuf:"bytes,2,opt,name=FilePath,proto3" json:"FilePath,omitempty"` - Range *FileRange `protobuf:"bytes,3,opt,name=Range,proto3" json:"Range,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ReadFileRequest) Reset() { @@ -1393,12 +1384,11 @@ func (x *ReadFileRequest) GetRange() *FileRange { } type FileRange struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Offset int64 `protobuf:"varint,1,opt,name=Offset,proto3" json:"Offset,omitempty"` + Length int64 `protobuf:"varint,2,opt,name=Length,proto3" json:"Length,omitempty"` unknownFields protoimpl.UnknownFields - - Offset int64 `protobuf:"varint,1,opt,name=Offset,proto3" json:"Offset,omitempty"` - Length int64 `protobuf:"varint,2,opt,name=Length,proto3" json:"Length,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FileRange) Reset() { @@ -1446,11 +1436,10 @@ func (x *FileRange) GetLength() int64 { } type ReadFileResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ReadFileResponse) Reset() { @@ -1491,13 +1480,12 @@ func (x *ReadFileResponse) GetData() []byte { } type ReadDirRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - DirPath string `protobuf:"bytes,2,opt,name=DirPath,proto3" json:"DirPath,omitempty"` - IncludePattern string `protobuf:"bytes,3,opt,name=IncludePattern,proto3" json:"IncludePattern,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + DirPath string `protobuf:"bytes,2,opt,name=DirPath,proto3" json:"DirPath,omitempty"` + IncludePattern string `protobuf:"bytes,3,opt,name=IncludePattern,proto3" json:"IncludePattern,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReadDirRequest) Reset() { @@ -1552,11 +1540,10 @@ func (x *ReadDirRequest) GetIncludePattern() string { } type ReadDirResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entries []*types.Stat `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` unknownFields protoimpl.UnknownFields - - Entries []*types.Stat `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ReadDirResponse) Reset() { @@ -1597,12 +1584,11 @@ func (x *ReadDirResponse) GetEntries() []*types.Stat { } type StatFileRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + Path string `protobuf:"bytes,2,opt,name=Path,proto3" json:"Path,omitempty"` unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Path string `protobuf:"bytes,2,opt,name=Path,proto3" json:"Path,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatFileRequest) Reset() { @@ -1650,11 +1636,10 @@ func (x *StatFileRequest) GetPath() string { } type StatFileResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Stat *types.Stat `protobuf:"bytes,1,opt,name=stat,proto3" json:"stat,omitempty"` unknownFields protoimpl.UnknownFields - - Stat *types.Stat `protobuf:"bytes,1,opt,name=stat,proto3" json:"stat,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StatFileResponse) Reset() { @@ -1695,11 +1680,10 @@ func (x *StatFileResponse) GetStat() *types.Stat { } type EvaluateRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` unknownFields protoimpl.UnknownFields - - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EvaluateRequest) Reset() { @@ -1740,9 +1724,9 @@ func (x *EvaluateRequest) GetRef() string { } type EvaluateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EvaluateResponse) Reset() { @@ -1776,9 +1760,9 @@ func (*EvaluateResponse) Descriptor() ([]byte, []int) { } type PingRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PingRequest) Reset() { @@ -1812,13 +1796,12 @@ func (*PingRequest) Descriptor() ([]byte, []int) { } type PongResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` FrontendAPICaps []*pb2.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps,omitempty"` LLBCaps []*pb2.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps,omitempty"` Workers []*types1.WorkerRecord `protobuf:"bytes,3,rep,name=Workers,proto3" json:"Workers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PongResponse) Reset() { @@ -1873,17 +1856,16 @@ func (x *PongResponse) GetWorkers() []*types1.WorkerRecord { } type WarnRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` + Short []byte `protobuf:"bytes,3,opt,name=short,proto3" json:"short,omitempty"` + Detail [][]byte `protobuf:"bytes,4,rep,name=detail,proto3" json:"detail,omitempty"` + Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` + Info *pb.SourceInfo `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` + Ranges []*pb.Range `protobuf:"bytes,7,rep,name=ranges,proto3" json:"ranges,omitempty"` unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` - Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` - Short []byte `protobuf:"bytes,3,opt,name=short,proto3" json:"short,omitempty"` - Detail [][]byte `protobuf:"bytes,4,rep,name=detail,proto3" json:"detail,omitempty"` - Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` - Info *pb.SourceInfo `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` - Ranges []*pb.Range `protobuf:"bytes,7,rep,name=ranges,proto3" json:"ranges,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WarnRequest) Reset() { @@ -1966,9 +1948,9 @@ func (x *WarnRequest) GetRanges() []*pb.Range { } type WarnResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *WarnResponse) Reset() { @@ -2002,18 +1984,17 @@ func (*WarnResponse) Descriptor() ([]byte, []int) { } type NewContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` // For mount input values we can use random identifiers passed with ref - Mounts []*pb.Mount `protobuf:"bytes,2,rep,name=Mounts,proto3" json:"Mounts,omitempty"` - Network pb.NetMode `protobuf:"varint,3,opt,name=Network,proto3,enum=pb.NetMode" json:"Network,omitempty"` - Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` - Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` - ExtraHosts []*pb.HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` - Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + Mounts []*pb.Mount `protobuf:"bytes,2,rep,name=Mounts,proto3" json:"Mounts,omitempty"` + Network pb.NetMode `protobuf:"varint,3,opt,name=Network,proto3,enum=pb.NetMode" json:"Network,omitempty"` + Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` + Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` + ExtraHosts []*pb.HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NewContainerRequest) Reset() { @@ -2096,9 +2077,9 @@ func (x *NewContainerRequest) GetHostname() string { } type NewContainerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *NewContainerResponse) Reset() { @@ -2132,11 +2113,10 @@ func (*NewContainerResponse) Descriptor() ([]byte, []int) { } type ReleaseContainerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ReleaseContainerRequest) Reset() { @@ -2177,9 +2157,9 @@ func (x *ReleaseContainerRequest) GetContainerID() string { } type ReleaseContainerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReleaseContainerResponse) Reset() { @@ -2213,12 +2193,9 @@ func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { } type ExecMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ProcessID string `protobuf:"bytes,1,opt,name=ProcessID,proto3" json:"ProcessID,omitempty"` - // Types that are assignable to Input: + state protoimpl.MessageState `protogen:"open.v1"` + ProcessID string `protobuf:"bytes,1,opt,name=ProcessID,proto3" json:"ProcessID,omitempty"` + // Types that are valid to be assigned to Input: // // *ExecMessage_Init // *ExecMessage_File @@ -2227,7 +2204,9 @@ type ExecMessage struct { // *ExecMessage_Exit // *ExecMessage_Done // *ExecMessage_Signal - Input isExecMessage_Input `protobuf_oneof:"Input"` + Input isExecMessage_Input `protobuf_oneof:"Input"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ExecMessage) Reset() { @@ -2267,58 +2246,72 @@ func (x *ExecMessage) GetProcessID() string { return "" } -func (m *ExecMessage) GetInput() isExecMessage_Input { - if m != nil { - return m.Input +func (x *ExecMessage) GetInput() isExecMessage_Input { + if x != nil { + return x.Input } return nil } func (x *ExecMessage) GetInit() *InitMessage { - if x, ok := x.GetInput().(*ExecMessage_Init); ok { - return x.Init + if x != nil { + if x, ok := x.Input.(*ExecMessage_Init); ok { + return x.Init + } } return nil } func (x *ExecMessage) GetFile() *FdMessage { - if x, ok := x.GetInput().(*ExecMessage_File); ok { - return x.File + if x != nil { + if x, ok := x.Input.(*ExecMessage_File); ok { + return x.File + } } return nil } func (x *ExecMessage) GetResize() *ResizeMessage { - if x, ok := x.GetInput().(*ExecMessage_Resize); ok { - return x.Resize + if x != nil { + if x, ok := x.Input.(*ExecMessage_Resize); ok { + return x.Resize + } } return nil } func (x *ExecMessage) GetStarted() *StartedMessage { - if x, ok := x.GetInput().(*ExecMessage_Started); ok { - return x.Started + if x != nil { + if x, ok := x.Input.(*ExecMessage_Started); ok { + return x.Started + } } return nil } func (x *ExecMessage) GetExit() *ExitMessage { - if x, ok := x.GetInput().(*ExecMessage_Exit); ok { - return x.Exit + if x != nil { + if x, ok := x.Input.(*ExecMessage_Exit); ok { + return x.Exit + } } return nil } func (x *ExecMessage) GetDone() *DoneMessage { - if x, ok := x.GetInput().(*ExecMessage_Done); ok { - return x.Done + if x != nil { + if x, ok := x.Input.(*ExecMessage_Done); ok { + return x.Done + } } return nil } func (x *ExecMessage) GetSignal() *SignalMessage { - if x, ok := x.GetInput().(*ExecMessage_Signal); ok { - return x.Signal + if x != nil { + if x, ok := x.Input.(*ExecMessage_Signal); ok { + return x.Signal + } } return nil } @@ -2382,16 +2375,15 @@ func (*ExecMessage_Done) isExecMessage_Input() {} func (*ExecMessage_Signal) isExecMessage_Input() {} type InitMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + Meta *pb.Meta `protobuf:"bytes,2,opt,name=Meta,proto3" json:"Meta,omitempty"` + Fds []uint32 `protobuf:"varint,3,rep,packed,name=Fds,proto3" json:"Fds,omitempty"` + Tty bool `protobuf:"varint,4,opt,name=Tty,proto3" json:"Tty,omitempty"` + Security pb.SecurityMode `protobuf:"varint,5,opt,name=Security,proto3,enum=pb.SecurityMode" json:"Security,omitempty"` + Secretenv []*pb.SecretEnv `protobuf:"bytes,6,rep,name=secretenv,proto3" json:"secretenv,omitempty"` unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - Meta *pb.Meta `protobuf:"bytes,2,opt,name=Meta,proto3" json:"Meta,omitempty"` - Fds []uint32 `protobuf:"varint,3,rep,packed,name=Fds,proto3" json:"Fds,omitempty"` - Tty bool `protobuf:"varint,4,opt,name=Tty,proto3" json:"Tty,omitempty"` - Security pb.SecurityMode `protobuf:"varint,5,opt,name=Security,proto3,enum=pb.SecurityMode" json:"Security,omitempty"` - Secretenv []*pb.SecretEnv `protobuf:"bytes,6,rep,name=secretenv,proto3" json:"secretenv,omitempty"` + sizeCache protoimpl.SizeCache } func (x *InitMessage) Reset() { @@ -2467,12 +2459,11 @@ func (x *InitMessage) GetSecretenv() []*pb.SecretEnv { } type ExitMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Code uint32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` + Error *status.Status `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` unknownFields protoimpl.UnknownFields - - Code uint32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` - Error *status.Status `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ExitMessage) Reset() { @@ -2520,9 +2511,9 @@ func (x *ExitMessage) GetError() *status.Status { } type StartedMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StartedMessage) Reset() { @@ -2556,9 +2547,9 @@ func (*StartedMessage) Descriptor() ([]byte, []int) { } type DoneMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DoneMessage) Reset() { @@ -2592,13 +2583,12 @@ func (*DoneMessage) Descriptor() ([]byte, []int) { } type FdMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Fd uint32 `protobuf:"varint,1,opt,name=Fd,proto3" json:"Fd,omitempty"` // what fd the data was from + EOF bool `protobuf:"varint,2,opt,name=EOF,proto3" json:"EOF,omitempty"` // true if eof was reached + Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` unknownFields protoimpl.UnknownFields - - Fd uint32 `protobuf:"varint,1,opt,name=Fd,proto3" json:"Fd,omitempty"` // what fd the data was from - EOF bool `protobuf:"varint,2,opt,name=EOF,proto3" json:"EOF,omitempty"` // true if eof was reached - Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FdMessage) Reset() { @@ -2653,12 +2643,11 @@ func (x *FdMessage) GetData() []byte { } type ResizeMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Rows uint32 `protobuf:"varint,1,opt,name=Rows,proto3" json:"Rows,omitempty"` + Cols uint32 `protobuf:"varint,2,opt,name=Cols,proto3" json:"Cols,omitempty"` unknownFields protoimpl.UnknownFields - - Rows uint32 `protobuf:"varint,1,opt,name=Rows,proto3" json:"Rows,omitempty"` - Cols uint32 `protobuf:"varint,2,opt,name=Cols,proto3" json:"Cols,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ResizeMessage) Reset() { @@ -2706,13 +2695,12 @@ func (x *ResizeMessage) GetCols() uint32 { } type SignalMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // we only send name (ie HUP, INT) because the int values // are platform dependent. - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SignalMessage) Reset() { @@ -2754,536 +2742,234 @@ func (x *SignalMessage) GetName() string { var File_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc = []byte{ - 0x0a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x66, 0x72, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x64, 0x2f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x70, 0x62, 0x2f, 0x67, - 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x1a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2f, 0x70, 0x62, 0x2f, 0x6f, 0x70, 0x73, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2f, 0x70, 0x62, - 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x61, 0x70, 0x69, - 0x63, 0x61, 0x70, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x61, 0x70, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x1a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, - 0x6f, 0x6e, 0x69, 0x73, 0x74, 0x69, 0x69, 0x67, 0x69, 0x2f, 0x66, 0x73, 0x75, 0x74, 0x69, 0x6c, - 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcb, 0x04, 0x0a, 0x06, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x26, 0x0a, 0x0d, 0x72, 0x65, 0x66, 0x44, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, - 0x72, 0x65, 0x66, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x55, 0x0a, - 0x0e, 0x72, 0x65, 0x66, 0x73, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x64, 0x2e, 0x52, 0x65, 0x66, 0x4d, 0x61, 0x70, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x48, 0x00, 0x52, 0x0e, 0x72, 0x65, 0x66, 0x73, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x32, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, - 0x66, 0x48, 0x00, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x37, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x66, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x04, 0x72, 0x65, 0x66, - 0x73, 0x12, 0x4b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, - 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x57, - 0x0a, 0x0c, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x61, 0x74, 0x74, 0x65, 0x73, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x68, 0x0a, 0x11, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x08, - 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x10, 0x52, 0x65, 0x66, - 0x4d, 0x61, 0x70, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x49, 0x0a, - 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x66, 0x4d, 0x61, 0x70, 0x44, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x52, 0x65, 0x66, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x22, 0x37, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x03, 0x64, 0x65, 0x66, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x64, 0x65, 0x66, 0x22, 0xa2, 0x01, 0x0a, 0x06, 0x52, - 0x65, 0x66, 0x4d, 0x61, 0x70, 0x12, 0x3f, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, - 0x52, 0x65, 0x66, 0x4d, 0x61, 0x70, 0x2e, 0x52, 0x65, 0x66, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x1a, 0x57, 0x0a, 0x09, 0x52, 0x65, 0x66, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x52, 0x65, 0x66, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, - 0x58, 0x0a, 0x0c, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, - 0x48, 0x0a, 0x0b, 0x61, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x61, 0x74, - 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x03, 0x0a, 0x0b, 0x41, 0x74, - 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x04, 0x6b, 0x69, 0x6e, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x64, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, - 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x50, 0x0a, 0x08, 0x6d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x41, 0x74, 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x30, 0x0a, 0x03, 0x72, - 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x66, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12, 0x12, 0x0a, - 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x30, 0x0a, 0x13, 0x69, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x50, 0x72, 0x65, 0x64, 0x69, - 0x63, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, - 0x69, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x50, 0x0a, 0x0e, 0x69, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x53, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x53, 0x75, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0e, 0x69, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x53, 0x75, 0x62, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x22, 0x7d, 0x0a, 0x0d, 0x49, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x53, 0x75, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x12, 0x40, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x2c, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x49, 0x6e, - 0x54, 0x6f, 0x74, 0x6f, 0x53, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x52, - 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x22, 0x74, 0x0a, 0x0d, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x39, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x28, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x52, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0f, 0x0a, 0x0d, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xbe, 0x01, 0x0a, 0x0e, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5c, 0x0a, - 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x4e, 0x0a, 0x10, 0x44, - 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xbe, 0x02, 0x0a, 0x19, - 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x28, 0x0a, 0x08, 0x50, - 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, - 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x50, 0x6c, 0x61, - 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x4d, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, - 0x6c, 0x76, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x4c, 0x6f, 0x67, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4c, 0x6f, 0x67, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x54, 0x79, 0x70, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x72, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x44, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x49, 0x44, 0x12, 0x4d, 0x0a, - 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, - 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x22, 0x5e, 0x0a, 0x1a, - 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x69, - 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x69, 0x67, 0x65, - 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, - 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x22, 0xf5, 0x01, 0x0a, - 0x18, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x06, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x28, 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x52, - 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x4c, 0x6f, 0x67, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x4c, 0x6f, 0x67, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x4d, 0x6f, - 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, - 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x4d, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, - 0x63, 0x69, 0x65, 0x73, 0x22, 0x8e, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, - 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x05, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x22, 0x4c, 0x0a, 0x1a, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x22, 0x85, 0x06, 0x0a, 0x0c, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x0a, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x12, 0x5a, 0x0a, 0x0b, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x4f, 0x70, 0x74, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, - 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x64, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x4f, 0x70, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0b, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x4f, 0x70, 0x74, 0x12, 0x2c, 0x0a, 0x11, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x74, 0x75, 0x72, - 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x30, 0x0a, 0x13, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x65, - 0x66, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x65, 0x66, 0x12, 0x14, 0x0a, 0x05, - 0x46, 0x69, 0x6e, 0x61, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x46, 0x69, 0x6e, - 0x61, 0x6c, 0x12, 0x22, 0x0a, 0x0c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x72, 0x41, 0x74, - 0x74, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x65, 0x72, 0x41, 0x74, 0x74, 0x72, 0x12, 0x50, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x63, 0x0a, 0x0e, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x3b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x6f, 0x6c, - 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x46, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x1a, 0x0a, - 0x08, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x0e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x4f, 0x70, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x51, 0x0a, 0x13, 0x46, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb0, 0x01, 0x0a, 0x11, - 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4d, 0x0a, 0x05, 0x41, 0x74, 0x74, 0x72, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x41, - 0x74, 0x74, 0x72, 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x5c, - 0x0a, 0x0d, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x10, 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, - 0x66, 0x12, 0x39, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x21, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x7b, 0x0a, 0x0f, - 0x52, 0x65, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, - 0x66, 0x12, 0x1a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x3a, 0x0a, - 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x52, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x3b, 0x0a, 0x09, 0x46, 0x69, 0x6c, - 0x65, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x4f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x26, 0x0a, 0x10, 0x52, 0x65, 0x61, 0x64, 0x46, 0x69, - 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, - 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x64, - 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, - 0x65, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x44, 0x69, 0x72, 0x50, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x44, 0x69, 0x72, 0x50, 0x61, 0x74, 0x68, 0x12, 0x26, 0x0a, 0x0e, - 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x22, 0x3f, 0x0a, 0x0f, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, - 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x66, 0x73, 0x75, 0x74, 0x69, - 0x6c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x65, 0x6e, - 0x74, 0x72, 0x69, 0x65, 0x73, 0x22, 0x37, 0x0a, 0x0f, 0x53, 0x74, 0x61, 0x74, 0x46, 0x69, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x52, 0x65, 0x66, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x12, 0x0a, 0x04, 0x50, 0x61, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x50, 0x61, 0x74, 0x68, 0x22, 0x3a, - 0x0a, 0x10, 0x53, 0x74, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x26, 0x0a, 0x04, 0x73, 0x74, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x12, 0x2e, 0x66, 0x73, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x73, 0x74, 0x61, 0x74, 0x22, 0x23, 0x0a, 0x0f, 0x45, 0x76, - 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, - 0x03, 0x52, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x22, - 0x12, 0x0a, 0x10, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x0d, 0x0a, 0x0b, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x22, 0xd6, 0x01, 0x0a, 0x0c, 0x50, 0x6f, 0x6e, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x0f, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, - 0x50, 0x49, 0x43, 0x61, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x61, 0x70, 0x69, 0x63, 0x61, 0x70, 0x73, 0x2e, 0x41, 0x50, 0x49, 0x43, 0x61, 0x70, 0x52, 0x0f, - 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x41, 0x50, 0x49, 0x43, 0x61, 0x70, 0x73, 0x12, - 0x3a, 0x0a, 0x07, 0x4c, 0x4c, 0x42, 0x43, 0x61, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x61, 0x70, 0x69, 0x63, 0x61, 0x70, 0x73, 0x2e, 0x41, 0x50, 0x49, 0x43, - 0x61, 0x70, 0x52, 0x07, 0x4c, 0x4c, 0x42, 0x43, 0x61, 0x70, 0x73, 0x12, 0x3e, 0x0a, 0x07, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x63, 0x6f, - 0x72, 0x64, 0x52, 0x07, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x22, 0xc2, 0x01, 0x0a, 0x0b, - 0x57, 0x61, 0x72, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, - 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, - 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x05, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x6f, - 0x72, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x22, 0x0a, 0x04, 0x69, 0x6e, 0x66, - 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x21, 0x0a, - 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, - 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, - 0x22, 0x0e, 0x0a, 0x0c, 0x57, 0x61, 0x72, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xac, 0x02, 0x0a, 0x13, 0x4e, 0x65, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x21, 0x0a, 0x06, 0x4d, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, - 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x06, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, - 0x07, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, - 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x37, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, - 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, - 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x48, 0x6f, - 0x73, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0x16, 0x0a, 0x14, 0x4e, 0x65, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x0a, 0x17, 0x52, 0x65, 0x6c, 0x65, 0x61, - 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x44, 0x22, 0x1a, 0x0a, 0x18, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xf9, 0x03, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x44, 0x12, 0x3c, - 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x12, 0x3a, 0x0a, 0x04, - 0x46, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x46, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x48, 0x00, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x42, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x69, - 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x48, 0x00, 0x52, 0x06, 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x45, 0x0a, 0x07, - 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, - 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x07, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x12, 0x3c, 0x0a, 0x04, 0x45, 0x78, 0x69, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x45, 0x78, - 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x04, 0x45, 0x78, 0x69, - 0x74, 0x12, 0x3c, 0x0a, 0x04, 0x44, 0x6f, 0x6e, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x44, 0x6f, 0x6e, 0x65, - 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x04, 0x44, 0x6f, 0x6e, 0x65, 0x12, - 0x42, 0x0a, 0x06, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x28, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x69, 0x67, 0x6e, - 0x61, 0x6c, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x48, 0x00, 0x52, 0x06, 0x53, 0x69, 0x67, - 0x6e, 0x61, 0x6c, 0x42, 0x07, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x22, 0xcc, 0x01, 0x0a, - 0x0b, 0x49, 0x6e, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1c, - 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, - 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x10, 0x0a, 0x03, - 0x46, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x03, 0x46, 0x64, 0x73, 0x12, 0x10, - 0x0a, 0x03, 0x54, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x54, 0x74, 0x79, - 0x12, 0x2c, 0x0a, 0x08, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2b, - 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x18, 0x06, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x45, 0x6e, 0x76, - 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x22, 0x4b, 0x0a, 0x0b, 0x45, - 0x78, 0x69, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x43, 0x6f, - 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x28, - 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x10, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x72, - 0x74, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x0d, 0x0a, 0x0b, 0x44, 0x6f, - 0x6e, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x41, 0x0a, 0x09, 0x46, 0x64, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x46, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x02, 0x46, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x45, 0x4f, 0x46, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x03, 0x45, 0x4f, 0x46, 0x12, 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, 0x37, 0x0a, 0x0d, - 0x52, 0x65, 0x73, 0x69, 0x7a, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x52, 0x6f, 0x77, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x52, 0x6f, 0x77, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x43, 0x6f, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, - 0x04, 0x43, 0x6f, 0x6c, 0x73, 0x22, 0x23, 0x0a, 0x0d, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x2a, 0x29, 0x0a, 0x0f, 0x41, 0x74, - 0x74, 0x65, 0x73, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0a, 0x0a, - 0x06, 0x49, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x42, 0x75, 0x6e, - 0x64, 0x6c, 0x65, 0x10, 0x01, 0x2a, 0x26, 0x0a, 0x11, 0x49, 0x6e, 0x54, 0x6f, 0x74, 0x6f, 0x53, - 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x65, - 0x6c, 0x66, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x61, 0x77, 0x10, 0x01, 0x32, 0xbd, 0x0b, - 0x0a, 0x09, 0x4c, 0x4c, 0x42, 0x42, 0x72, 0x69, 0x64, 0x67, 0x65, 0x12, 0x81, 0x01, 0x0a, 0x12, - 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x34, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x7e, 0x0a, 0x11, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x33, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x5a, 0x0a, 0x05, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x12, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x28, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, - 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x6f, - 0x6c, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x08, 0x52, - 0x65, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, - 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, - 0x52, 0x65, 0x61, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x60, 0x0a, 0x07, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x12, 0x29, 0x2e, 0x6d, 0x6f, - 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x69, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x63, 0x0a, 0x08, 0x53, 0x74, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x46, - 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x63, 0x0a, 0x08, 0x45, 0x76, 0x61, 0x6c, 0x75, - 0x61, 0x74, 0x65, 0x12, 0x2a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, - 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x45, 0x76, 0x61, 0x6c, - 0x75, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x04, - 0x50, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x50, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x50, 0x6f, 0x6e, 0x67, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, - 0x28, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x74, 0x75, - 0x72, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, - 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x06, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x28, - 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, - 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x6f, 0x0a, 0x0c, 0x4e, 0x65, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, - 0x4e, 0x65, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, - 0x4e, 0x65, 0x77, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x10, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x12, 0x32, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x43, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x61, 0x0a, 0x0b, 0x45, 0x78, 0x65, 0x63, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, - 0x12, 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x45, 0x78, 0x65, - 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, - 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x28, 0x01, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x04, 0x57, 0x61, 0x72, 0x6e, 0x12, 0x26, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x2e, 0x57, 0x61, 0x72, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, - 0x2e, 0x57, 0x61, 0x72, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x48, 0x5a, - 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, - 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x66, 0x72, 0x6f, 0x6e, 0x74, 0x65, - 0x6e, 0x64, 0x2f, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x70, 0x62, 0x3b, 0x6d, 0x6f, - 0x62, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x5f, 0x76, 0x31, 0x5f, 0x66, - 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc = "" + + "\n" + + ":github.com/moby/buildkit/frontend/gateway/pb/gateway.proto\x12\x19moby.buildkit.v1.frontend\x1a/github.com/moby/buildkit/api/types/worker.proto\x1a,github.com/moby/buildkit/solver/pb/ops.proto\x1a5github.com/moby/buildkit/sourcepolicy/pb/policy.proto\x1a3github.com/moby/buildkit/util/apicaps/pb/caps.proto\x1a-github.com/tonistiigi/fsutil/types/stat.proto\x1a\x17google/rpc/status.proto\"\xcb\x04\n" + + "\x06Result\x12&\n" + + "\rrefDeprecated\x18\x01 \x01(\tH\x00R\rrefDeprecated\x12U\n" + + "\x0erefsDeprecated\x18\x02 \x01(\v2+.moby.buildkit.v1.frontend.RefMapDeprecatedH\x00R\x0erefsDeprecated\x122\n" + + "\x03ref\x18\x03 \x01(\v2\x1e.moby.buildkit.v1.frontend.RefH\x00R\x03ref\x127\n" + + "\x04refs\x18\x04 \x01(\v2!.moby.buildkit.v1.frontend.RefMapH\x00R\x04refs\x12K\n" + + "\bmetadata\x18\n" + + " \x03(\v2/.moby.buildkit.v1.frontend.Result.MetadataEntryR\bmetadata\x12W\n" + + "\fattestations\x18\f \x03(\v23.moby.buildkit.v1.frontend.Result.AttestationsEntryR\fattestations\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value:\x028\x01\x1ah\n" + + "\x11AttestationsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12=\n" + + "\x05value\x18\x02 \x01(\v2'.moby.buildkit.v1.frontend.AttestationsR\x05value:\x028\x01B\b\n" + + "\x06result\"\x96\x01\n" + + "\x10RefMapDeprecated\x12I\n" + + "\x04refs\x18\x01 \x03(\v25.moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntryR\x04refs\x1a7\n" + + "\tRefsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"7\n" + + "\x03Ref\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12 \n" + + "\x03def\x18\x02 \x01(\v2\x0e.pb.DefinitionR\x03def\"\xa2\x01\n" + + "\x06RefMap\x12?\n" + + "\x04refs\x18\x01 \x03(\v2+.moby.buildkit.v1.frontend.RefMap.RefsEntryR\x04refs\x1aW\n" + + "\tRefsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x124\n" + + "\x05value\x18\x02 \x01(\v2\x1e.moby.buildkit.v1.frontend.RefR\x05value:\x028\x01\"X\n" + + "\fAttestations\x12H\n" + + "\vattestation\x18\x01 \x03(\v2&.moby.buildkit.v1.frontend.AttestationR\vattestation\"\xa6\x03\n" + + "\vAttestation\x12>\n" + + "\x04kind\x18\x01 \x01(\x0e2*.moby.buildkit.v1.frontend.AttestationKindR\x04kind\x12P\n" + + "\bmetadata\x18\x02 \x03(\v24.moby.buildkit.v1.frontend.Attestation.MetadataEntryR\bmetadata\x120\n" + + "\x03ref\x18\x03 \x01(\v2\x1e.moby.buildkit.v1.frontend.RefR\x03ref\x12\x12\n" + + "\x04path\x18\x04 \x01(\tR\x04path\x120\n" + + "\x13inTotoPredicateType\x18\x05 \x01(\tR\x13inTotoPredicateType\x12P\n" + + "\x0einTotoSubjects\x18\x06 \x03(\v2(.moby.buildkit.v1.frontend.InTotoSubjectR\x0einTotoSubjects\x1a;\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05value:\x028\x01\"}\n" + + "\rInTotoSubject\x12@\n" + + "\x04kind\x18\x01 \x01(\x0e2,.moby.buildkit.v1.frontend.InTotoSubjectKindR\x04kind\x12\x16\n" + + "\x06digest\x18\x02 \x03(\tR\x06digest\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\"t\n" + + "\rReturnRequest\x129\n" + + "\x06result\x18\x01 \x01(\v2!.moby.buildkit.v1.frontend.ResultR\x06result\x12(\n" + + "\x05error\x18\x02 \x01(\v2\x12.google.rpc.StatusR\x05error\"\x10\n" + + "\x0eReturnResponse\"\x0f\n" + + "\rInputsRequest\"\xbe\x01\n" + + "\x0eInputsResponse\x12\\\n" + + "\vDefinitions\x18\x01 \x03(\v2:.moby.buildkit.v1.frontend.InputsResponse.DefinitionsEntryR\vDefinitions\x1aN\n" + + "\x10DefinitionsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.pb.DefinitionR\x05value:\x028\x01\"\xbe\x02\n" + + "\x19ResolveImageConfigRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12(\n" + + "\bPlatform\x18\x02 \x01(\v2\f.pb.PlatformR\bPlatform\x12 \n" + + "\vResolveMode\x18\x03 \x01(\tR\vResolveMode\x12\x18\n" + + "\aLogName\x18\x04 \x01(\tR\aLogName\x12\"\n" + + "\fResolverType\x18\x05 \x01(\x05R\fResolverType\x12\x1c\n" + + "\tSessionID\x18\x06 \x01(\tR\tSessionID\x12\x18\n" + + "\aStoreID\x18\a \x01(\tR\aStoreID\x12M\n" + + "\x0eSourcePolicies\x18\b \x03(\v2%.moby.buildkit.v1.sourcepolicy.PolicyR\x0eSourcePolicies\"^\n" + + "\x1aResolveImageConfigResponse\x12\x16\n" + + "\x06Digest\x18\x01 \x01(\tR\x06Digest\x12\x16\n" + + "\x06Config\x18\x02 \x01(\fR\x06Config\x12\x10\n" + + "\x03Ref\x18\x03 \x01(\tR\x03Ref\"\xf5\x01\n" + + "\x18ResolveSourceMetaRequest\x12$\n" + + "\x06Source\x18\x01 \x01(\v2\f.pb.SourceOpR\x06Source\x12(\n" + + "\bPlatform\x18\x02 \x01(\v2\f.pb.PlatformR\bPlatform\x12\x18\n" + + "\aLogName\x18\x03 \x01(\tR\aLogName\x12 \n" + + "\vResolveMode\x18\x04 \x01(\tR\vResolveMode\x12M\n" + + "\x0eSourcePolicies\x18\b \x03(\v2%.moby.buildkit.v1.sourcepolicy.PolicyR\x0eSourcePolicies\"\x8e\x01\n" + + "\x19ResolveSourceMetaResponse\x12$\n" + + "\x06Source\x18\x01 \x01(\v2\f.pb.SourceOpR\x06Source\x12K\n" + + "\x05Image\x18\x02 \x01(\v25.moby.buildkit.v1.frontend.ResolveSourceImageResponseR\x05Image\"L\n" + + "\x1aResolveSourceImageResponse\x12\x16\n" + + "\x06Digest\x18\x01 \x01(\tR\x06Digest\x12\x16\n" + + "\x06Config\x18\x02 \x01(\fR\x06Config\"\x85\x06\n" + + "\fSolveRequest\x12.\n" + + "\n" + + "Definition\x18\x01 \x01(\v2\x0e.pb.DefinitionR\n" + + "Definition\x12\x1a\n" + + "\bFrontend\x18\x02 \x01(\tR\bFrontend\x12Z\n" + + "\vFrontendOpt\x18\x03 \x03(\v28.moby.buildkit.v1.frontend.SolveRequest.FrontendOptEntryR\vFrontendOpt\x12,\n" + + "\x11allowResultReturn\x18\x05 \x01(\bR\x11allowResultReturn\x120\n" + + "\x13allowResultArrayRef\x18\x06 \x01(\bR\x13allowResultArrayRef\x12\x14\n" + + "\x05Final\x18\n" + + " \x01(\bR\x05Final\x12\"\n" + + "\fExporterAttr\x18\v \x01(\fR\fExporterAttr\x12P\n" + + "\fCacheImports\x18\f \x03(\v2,.moby.buildkit.v1.frontend.CacheOptionsEntryR\fCacheImports\x12c\n" + + "\x0eFrontendInputs\x18\r \x03(\v2;.moby.buildkit.v1.frontend.SolveRequest.FrontendInputsEntryR\x0eFrontendInputs\x12\x1a\n" + + "\bEvaluate\x18\x0e \x01(\bR\bEvaluate\x12M\n" + + "\x0eSourcePolicies\x18\x0f \x03(\v2%.moby.buildkit.v1.sourcepolicy.PolicyR\x0eSourcePolicies\x1a>\n" + + "\x10FrontendOptEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1aQ\n" + + "\x13FrontendInputsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.pb.DefinitionR\x05value:\x028\x01\"\xb0\x01\n" + + "\x11CacheOptionsEntry\x12\x12\n" + + "\x04Type\x18\x01 \x01(\tR\x04Type\x12M\n" + + "\x05Attrs\x18\x02 \x03(\v27.moby.buildkit.v1.frontend.CacheOptionsEntry.AttrsEntryR\x05Attrs\x1a8\n" + + "\n" + + "AttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\\\n" + + "\rSolveResponse\x12\x10\n" + + "\x03ref\x18\x01 \x01(\tR\x03ref\x129\n" + + "\x06result\x18\x03 \x01(\v2!.moby.buildkit.v1.frontend.ResultR\x06result\"{\n" + + "\x0fReadFileRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12\x1a\n" + + "\bFilePath\x18\x02 \x01(\tR\bFilePath\x12:\n" + + "\x05Range\x18\x03 \x01(\v2$.moby.buildkit.v1.frontend.FileRangeR\x05Range\";\n" + + "\tFileRange\x12\x16\n" + + "\x06Offset\x18\x01 \x01(\x03R\x06Offset\x12\x16\n" + + "\x06Length\x18\x02 \x01(\x03R\x06Length\"&\n" + + "\x10ReadFileResponse\x12\x12\n" + + "\x04Data\x18\x01 \x01(\fR\x04Data\"d\n" + + "\x0eReadDirRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12\x18\n" + + "\aDirPath\x18\x02 \x01(\tR\aDirPath\x12&\n" + + "\x0eIncludePattern\x18\x03 \x01(\tR\x0eIncludePattern\"?\n" + + "\x0fReadDirResponse\x12,\n" + + "\aentries\x18\x01 \x03(\v2\x12.fsutil.types.StatR\aentries\"7\n" + + "\x0fStatFileRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\x12\x12\n" + + "\x04Path\x18\x02 \x01(\tR\x04Path\":\n" + + "\x10StatFileResponse\x12&\n" + + "\x04stat\x18\x01 \x01(\v2\x12.fsutil.types.StatR\x04stat\"#\n" + + "\x0fEvaluateRequest\x12\x10\n" + + "\x03Ref\x18\x01 \x01(\tR\x03Ref\"\x12\n" + + "\x10EvaluateResponse\"\r\n" + + "\vPingRequest\"\xd6\x01\n" + + "\fPongResponse\x12J\n" + + "\x0fFrontendAPICaps\x18\x01 \x03(\v2 .moby.buildkit.v1.apicaps.APICapR\x0fFrontendAPICaps\x12:\n" + + "\aLLBCaps\x18\x02 \x03(\v2 .moby.buildkit.v1.apicaps.APICapR\aLLBCaps\x12>\n" + + "\aWorkers\x18\x03 \x03(\v2$.moby.buildkit.v1.types.WorkerRecordR\aWorkers\"\xc2\x01\n" + + "\vWarnRequest\x12\x16\n" + + "\x06digest\x18\x01 \x01(\tR\x06digest\x12\x14\n" + + "\x05level\x18\x02 \x01(\x03R\x05level\x12\x14\n" + + "\x05short\x18\x03 \x01(\fR\x05short\x12\x16\n" + + "\x06detail\x18\x04 \x03(\fR\x06detail\x12\x10\n" + + "\x03url\x18\x05 \x01(\tR\x03url\x12\"\n" + + "\x04info\x18\x06 \x01(\v2\x0e.pb.SourceInfoR\x04info\x12!\n" + + "\x06ranges\x18\a \x03(\v2\t.pb.RangeR\x06ranges\"\x0e\n" + + "\fWarnResponse\"\xac\x02\n" + + "\x13NewContainerRequest\x12 \n" + + "\vContainerID\x18\x01 \x01(\tR\vContainerID\x12!\n" + + "\x06Mounts\x18\x02 \x03(\v2\t.pb.MountR\x06Mounts\x12%\n" + + "\aNetwork\x18\x03 \x01(\x0e2\v.pb.NetModeR\aNetwork\x12(\n" + + "\bplatform\x18\x04 \x01(\v2\f.pb.PlatformR\bplatform\x127\n" + + "\vconstraints\x18\x05 \x01(\v2\x15.pb.WorkerConstraintsR\vconstraints\x12*\n" + + "\n" + + "extraHosts\x18\x06 \x03(\v2\n" + + ".pb.HostIPR\n" + + "extraHosts\x12\x1a\n" + + "\bhostname\x18\a \x01(\tR\bhostname\"\x16\n" + + "\x14NewContainerResponse\";\n" + + "\x17ReleaseContainerRequest\x12 \n" + + "\vContainerID\x18\x01 \x01(\tR\vContainerID\"\x1a\n" + + "\x18ReleaseContainerResponse\"\xf9\x03\n" + + "\vExecMessage\x12\x1c\n" + + "\tProcessID\x18\x01 \x01(\tR\tProcessID\x12<\n" + + "\x04Init\x18\x02 \x01(\v2&.moby.buildkit.v1.frontend.InitMessageH\x00R\x04Init\x12:\n" + + "\x04File\x18\x03 \x01(\v2$.moby.buildkit.v1.frontend.FdMessageH\x00R\x04File\x12B\n" + + "\x06Resize\x18\x04 \x01(\v2(.moby.buildkit.v1.frontend.ResizeMessageH\x00R\x06Resize\x12E\n" + + "\aStarted\x18\x05 \x01(\v2).moby.buildkit.v1.frontend.StartedMessageH\x00R\aStarted\x12<\n" + + "\x04Exit\x18\x06 \x01(\v2&.moby.buildkit.v1.frontend.ExitMessageH\x00R\x04Exit\x12<\n" + + "\x04Done\x18\a \x01(\v2&.moby.buildkit.v1.frontend.DoneMessageH\x00R\x04Done\x12B\n" + + "\x06Signal\x18\b \x01(\v2(.moby.buildkit.v1.frontend.SignalMessageH\x00R\x06SignalB\a\n" + + "\x05Input\"\xcc\x01\n" + + "\vInitMessage\x12 \n" + + "\vContainerID\x18\x01 \x01(\tR\vContainerID\x12\x1c\n" + + "\x04Meta\x18\x02 \x01(\v2\b.pb.MetaR\x04Meta\x12\x10\n" + + "\x03Fds\x18\x03 \x03(\rR\x03Fds\x12\x10\n" + + "\x03Tty\x18\x04 \x01(\bR\x03Tty\x12,\n" + + "\bSecurity\x18\x05 \x01(\x0e2\x10.pb.SecurityModeR\bSecurity\x12+\n" + + "\tsecretenv\x18\x06 \x03(\v2\r.pb.SecretEnvR\tsecretenv\"K\n" + + "\vExitMessage\x12\x12\n" + + "\x04Code\x18\x01 \x01(\rR\x04Code\x12(\n" + + "\x05Error\x18\x02 \x01(\v2\x12.google.rpc.StatusR\x05Error\"\x10\n" + + "\x0eStartedMessage\"\r\n" + + "\vDoneMessage\"A\n" + + "\tFdMessage\x12\x0e\n" + + "\x02Fd\x18\x01 \x01(\rR\x02Fd\x12\x10\n" + + "\x03EOF\x18\x02 \x01(\bR\x03EOF\x12\x12\n" + + "\x04Data\x18\x03 \x01(\fR\x04Data\"7\n" + + "\rResizeMessage\x12\x12\n" + + "\x04Rows\x18\x01 \x01(\rR\x04Rows\x12\x12\n" + + "\x04Cols\x18\x02 \x01(\rR\x04Cols\"#\n" + + "\rSignalMessage\x12\x12\n" + + "\x04Name\x18\x01 \x01(\tR\x04Name*)\n" + + "\x0fAttestationKind\x12\n" + + "\n" + + "\x06InToto\x10\x00\x12\n" + + "\n" + + "\x06Bundle\x10\x01*&\n" + + "\x11InTotoSubjectKind\x12\b\n" + + "\x04Self\x10\x00\x12\a\n" + + "\x03Raw\x10\x012\xbd\v\n" + + "\tLLBBridge\x12\x81\x01\n" + + "\x12ResolveImageConfig\x124.moby.buildkit.v1.frontend.ResolveImageConfigRequest\x1a5.moby.buildkit.v1.frontend.ResolveImageConfigResponse\x12~\n" + + "\x11ResolveSourceMeta\x123.moby.buildkit.v1.frontend.ResolveSourceMetaRequest\x1a4.moby.buildkit.v1.frontend.ResolveSourceMetaResponse\x12Z\n" + + "\x05Solve\x12'.moby.buildkit.v1.frontend.SolveRequest\x1a(.moby.buildkit.v1.frontend.SolveResponse\x12c\n" + + "\bReadFile\x12*.moby.buildkit.v1.frontend.ReadFileRequest\x1a+.moby.buildkit.v1.frontend.ReadFileResponse\x12`\n" + + "\aReadDir\x12).moby.buildkit.v1.frontend.ReadDirRequest\x1a*.moby.buildkit.v1.frontend.ReadDirResponse\x12c\n" + + "\bStatFile\x12*.moby.buildkit.v1.frontend.StatFileRequest\x1a+.moby.buildkit.v1.frontend.StatFileResponse\x12c\n" + + "\bEvaluate\x12*.moby.buildkit.v1.frontend.EvaluateRequest\x1a+.moby.buildkit.v1.frontend.EvaluateResponse\x12W\n" + + "\x04Ping\x12&.moby.buildkit.v1.frontend.PingRequest\x1a'.moby.buildkit.v1.frontend.PongResponse\x12]\n" + + "\x06Return\x12(.moby.buildkit.v1.frontend.ReturnRequest\x1a).moby.buildkit.v1.frontend.ReturnResponse\x12]\n" + + "\x06Inputs\x12(.moby.buildkit.v1.frontend.InputsRequest\x1a).moby.buildkit.v1.frontend.InputsResponse\x12o\n" + + "\fNewContainer\x12..moby.buildkit.v1.frontend.NewContainerRequest\x1a/.moby.buildkit.v1.frontend.NewContainerResponse\x12{\n" + + "\x10ReleaseContainer\x122.moby.buildkit.v1.frontend.ReleaseContainerRequest\x1a3.moby.buildkit.v1.frontend.ReleaseContainerResponse\x12a\n" + + "\vExecProcess\x12&.moby.buildkit.v1.frontend.ExecMessage\x1a&.moby.buildkit.v1.frontend.ExecMessage(\x010\x01\x12W\n" + + "\x04Warn\x12&.moby.buildkit.v1.frontend.WarnRequest\x1a'.moby.buildkit.v1.frontend.WarnResponseBHZFgithub.com/moby/buildkit/frontend/gateway/pb;moby_buildkit_v1_frontendb\x06proto3" var ( file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescData = file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc + file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescData []byte ) func file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescData) + file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc), len(file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc))) }) return file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDescData } @@ -3483,7 +3169,7 @@ func file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc), len(file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc)), NumEnums: 2, NumMessages: 53, NumExtensions: 0, @@ -3495,7 +3181,6 @@ func file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_init() { MessageInfos: file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_msgTypes, }.Build() File_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto = out.File - file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_rawDesc = nil file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_goTypes = nil file_github_com_moby_buildkit_frontend_gateway_pb_gateway_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go index 45a4c0eeb..3b1e980ee 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go @@ -134,7 +134,7 @@ func (results *LintResults) ToResult(scb SourceInfoMap) (*client.Result, error) if len(results.Warnings) > 0 || results.Error != nil { status = 1 } - res.AddMeta("result.statuscode", []byte(fmt.Sprintf("%d", status))) + res.AddMeta("result.statuscode", fmt.Appendf(nil, "%d", status)) res.AddMeta("version", []byte(SubrequestLintDefinition.Version)) return res, nil diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/auth.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/auth.pb.go index 30913d3e8..27dbd9861 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/auth.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/auth.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/session/auth/auth.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,11 +22,10 @@ const ( ) type CredentialsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CredentialsRequest) Reset() { @@ -66,12 +66,11 @@ func (x *CredentialsRequest) GetHost() string { } type CredentialsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"` + Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"` unknownFields protoimpl.UnknownFields - - Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"` - Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CredentialsResponse) Reset() { @@ -119,15 +118,14 @@ func (x *CredentialsResponse) GetSecret() string { } type FetchTokenRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ClientID string `protobuf:"bytes,1,opt,name=ClientID,proto3" json:"ClientID,omitempty"` + Host string `protobuf:"bytes,2,opt,name=Host,proto3" json:"Host,omitempty"` + Realm string `protobuf:"bytes,3,opt,name=Realm,proto3" json:"Realm,omitempty"` + Service string `protobuf:"bytes,4,opt,name=Service,proto3" json:"Service,omitempty"` + Scopes []string `protobuf:"bytes,5,rep,name=Scopes,proto3" json:"Scopes,omitempty"` unknownFields protoimpl.UnknownFields - - ClientID string `protobuf:"bytes,1,opt,name=ClientID,proto3" json:"ClientID,omitempty"` - Host string `protobuf:"bytes,2,opt,name=Host,proto3" json:"Host,omitempty"` - Realm string `protobuf:"bytes,3,opt,name=Realm,proto3" json:"Realm,omitempty"` - Service string `protobuf:"bytes,4,opt,name=Service,proto3" json:"Service,omitempty"` - Scopes []string `protobuf:"bytes,5,rep,name=Scopes,proto3" json:"Scopes,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FetchTokenRequest) Reset() { @@ -196,13 +194,12 @@ func (x *FetchTokenRequest) GetScopes() []string { } type FetchTokenResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` + ExpiresIn int64 `protobuf:"varint,2,opt,name=ExpiresIn,proto3" json:"ExpiresIn,omitempty"` // seconds + IssuedAt int64 `protobuf:"varint,3,opt,name=IssuedAt,proto3" json:"IssuedAt,omitempty"` // timestamp unknownFields protoimpl.UnknownFields - - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` - ExpiresIn int64 `protobuf:"varint,2,opt,name=ExpiresIn,proto3" json:"ExpiresIn,omitempty"` // seconds - IssuedAt int64 `protobuf:"varint,3,opt,name=IssuedAt,proto3" json:"IssuedAt,omitempty"` // timestamp + sizeCache protoimpl.SizeCache } func (x *FetchTokenResponse) Reset() { @@ -257,12 +254,11 @@ func (x *FetchTokenResponse) GetIssuedAt() int64 { } type GetTokenAuthorityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Salt []byte `protobuf:"bytes,2,opt,name=Salt,proto3" json:"Salt,omitempty"` unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - Salt []byte `protobuf:"bytes,2,opt,name=Salt,proto3" json:"Salt,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetTokenAuthorityRequest) Reset() { @@ -310,11 +306,10 @@ func (x *GetTokenAuthorityRequest) GetSalt() []byte { } type GetTokenAuthorityResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"` unknownFields protoimpl.UnknownFields - - PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetTokenAuthorityResponse) Reset() { @@ -355,13 +350,12 @@ func (x *GetTokenAuthorityResponse) GetPublicKey() []byte { } type VerifyTokenAuthorityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=Payload,proto3" json:"Payload,omitempty"` + Salt []byte `protobuf:"bytes,3,opt,name=Salt,proto3" json:"Salt,omitempty"` unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - Payload []byte `protobuf:"bytes,2,opt,name=Payload,proto3" json:"Payload,omitempty"` - Salt []byte `protobuf:"bytes,3,opt,name=Salt,proto3" json:"Salt,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VerifyTokenAuthorityRequest) Reset() { @@ -416,11 +410,10 @@ func (x *VerifyTokenAuthorityRequest) GetSalt() []byte { } type VerifyTokenAuthorityResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Signed []byte `protobuf:"bytes,1,opt,name=Signed,proto3" json:"Signed,omitempty"` unknownFields protoimpl.UnknownFields - - Signed []byte `protobuf:"bytes,1,opt,name=Signed,proto3" json:"Signed,omitempty"` + sizeCache protoimpl.SizeCache } func (x *VerifyTokenAuthorityResponse) Reset() { @@ -462,91 +455,50 @@ func (x *VerifyTokenAuthorityResponse) GetSigned() []byte { var File_github_com_moby_buildkit_session_auth_auth_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc = []byte{ - 0x0a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x10, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, - 0x63, 0x2e, 0x76, 0x31, 0x22, 0x28, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x22, 0x49, - 0x0a, 0x13, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x55, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x8b, 0x01, 0x0a, 0x11, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x1a, 0x0a, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x48, - 0x6f, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, - 0x14, 0x0a, 0x05, 0x52, 0x65, 0x61, 0x6c, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x52, 0x65, 0x61, 0x6c, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x06, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x22, 0x64, 0x0a, 0x12, 0x46, 0x65, 0x74, 0x63, 0x68, - 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x54, 0x6f, - 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x49, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x49, - 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x08, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x41, 0x74, 0x22, 0x42, 0x0a, - 0x18, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, - 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x53, 0x61, 0x6c, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x53, 0x61, 0x6c, - 0x74, 0x22, 0x39, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, - 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x22, 0x5f, 0x0a, 0x1b, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x48, - 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x61, 0x6c, - 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x53, 0x61, 0x6c, 0x74, 0x22, 0x36, 0x0a, - 0x1c, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, 0x68, - 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x53, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x32, 0xa0, 0x03, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x5a, - 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x12, 0x24, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, - 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x6c, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, - 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x61, - 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x0a, 0x46, 0x65, - 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, - 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, - 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x6c, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, - 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, - 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x75, 0x0a, 0x14, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, - 0x69, 0x66, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x69, - 0x66, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x74, 0x79, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x27, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, - 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x61, 0x75, 0x74, - 0x68, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc = "" + + "\n" + + "0github.com/moby/buildkit/session/auth/auth.proto\x12\x10moby.filesync.v1\"(\n" + + "\x12CredentialsRequest\x12\x12\n" + + "\x04Host\x18\x01 \x01(\tR\x04Host\"I\n" + + "\x13CredentialsResponse\x12\x1a\n" + + "\bUsername\x18\x01 \x01(\tR\bUsername\x12\x16\n" + + "\x06Secret\x18\x02 \x01(\tR\x06Secret\"\x8b\x01\n" + + "\x11FetchTokenRequest\x12\x1a\n" + + "\bClientID\x18\x01 \x01(\tR\bClientID\x12\x12\n" + + "\x04Host\x18\x02 \x01(\tR\x04Host\x12\x14\n" + + "\x05Realm\x18\x03 \x01(\tR\x05Realm\x12\x18\n" + + "\aService\x18\x04 \x01(\tR\aService\x12\x16\n" + + "\x06Scopes\x18\x05 \x03(\tR\x06Scopes\"d\n" + + "\x12FetchTokenResponse\x12\x14\n" + + "\x05Token\x18\x01 \x01(\tR\x05Token\x12\x1c\n" + + "\tExpiresIn\x18\x02 \x01(\x03R\tExpiresIn\x12\x1a\n" + + "\bIssuedAt\x18\x03 \x01(\x03R\bIssuedAt\"B\n" + + "\x18GetTokenAuthorityRequest\x12\x12\n" + + "\x04Host\x18\x01 \x01(\tR\x04Host\x12\x12\n" + + "\x04Salt\x18\x02 \x01(\fR\x04Salt\"9\n" + + "\x19GetTokenAuthorityResponse\x12\x1c\n" + + "\tPublicKey\x18\x01 \x01(\fR\tPublicKey\"_\n" + + "\x1bVerifyTokenAuthorityRequest\x12\x12\n" + + "\x04Host\x18\x01 \x01(\tR\x04Host\x12\x18\n" + + "\aPayload\x18\x02 \x01(\fR\aPayload\x12\x12\n" + + "\x04Salt\x18\x03 \x01(\fR\x04Salt\"6\n" + + "\x1cVerifyTokenAuthorityResponse\x12\x16\n" + + "\x06Signed\x18\x01 \x01(\fR\x06Signed2\xa0\x03\n" + + "\x04Auth\x12Z\n" + + "\vCredentials\x12$.moby.filesync.v1.CredentialsRequest\x1a%.moby.filesync.v1.CredentialsResponse\x12W\n" + + "\n" + + "FetchToken\x12#.moby.filesync.v1.FetchTokenRequest\x1a$.moby.filesync.v1.FetchTokenResponse\x12l\n" + + "\x11GetTokenAuthority\x12*.moby.filesync.v1.GetTokenAuthorityRequest\x1a+.moby.filesync.v1.GetTokenAuthorityResponse\x12u\n" + + "\x14VerifyTokenAuthority\x12-.moby.filesync.v1.VerifyTokenAuthorityRequest\x1a..moby.filesync.v1.VerifyTokenAuthorityResponseB'Z%github.com/moby/buildkit/session/authb\x06proto3" var ( file_github_com_moby_buildkit_session_auth_auth_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_session_auth_auth_proto_rawDescData = file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc + file_github_com_moby_buildkit_session_auth_auth_proto_rawDescData []byte ) func file_github_com_moby_buildkit_session_auth_auth_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_session_auth_auth_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_session_auth_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_session_auth_auth_proto_rawDescData) + file_github_com_moby_buildkit_session_auth_auth_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc), len(file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc))) }) return file_github_com_moby_buildkit_session_auth_auth_proto_rawDescData } @@ -587,7 +539,7 @@ func file_github_com_moby_buildkit_session_auth_auth_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc), len(file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc)), NumEnums: 0, NumMessages: 8, NumExtensions: 0, @@ -598,7 +550,6 @@ func file_github_com_moby_buildkit_session_auth_auth_proto_init() { MessageInfos: file_github_com_moby_buildkit_session_auth_auth_proto_msgTypes, }.Build() File_github_com_moby_buildkit_session_auth_auth_proto = out.File - file_github_com_moby_buildkit_session_auth_auth_proto_rawDesc = nil file_github_com_moby_buildkit_session_auth_auth_proto_goTypes = nil file_github_com_moby_buildkit_session_auth_auth_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go index 0280e96dc..88fe7d02b 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go @@ -8,6 +8,7 @@ import ( "crypto/tls" "crypto/x509" "fmt" + "net/http" "os" "runtime" "strconv" @@ -20,7 +21,7 @@ import ( "github.com/docker/cli/cli/config" "github.com/docker/cli/cli/config/configfile" "github.com/docker/cli/cli/config/types" - http "github.com/hashicorp/go-cleanhttp" + cleanhttp "github.com/hashicorp/go-cleanhttp" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/auth" "github.com/moby/buildkit/util/progress/progresswriter" @@ -45,7 +46,7 @@ type DockerAuthProviderConfig struct { TLSConfigs map[string]*AuthTLSConfig // ExpireCachedAuth is a function that returns true auth config should be refreshed // instead of using a pre-cached result. - // If nil then the cached result will expire after 10 minutes. + // If nil then the cached result will expire after 4 minutes and 50 seconds. // The function is called with the time the cached auth config was created // and the server URL the auth config is for. ExpireCachedAuth func(created time.Time, serverURL string) bool @@ -59,7 +60,8 @@ type authConfigCacheEntry struct { func NewDockerAuthProvider(cfg DockerAuthProviderConfig) session.Attachable { if cfg.ExpireCachedAuth == nil { cfg.ExpireCachedAuth = func(created time.Time, _ string) bool { - return time.Since(created) > 10*time.Minute + // Tokens for Google Artifact Registry via Workload Identity expire after 5 minutes. + return time.Since(created) > 4*time.Minute+50*time.Second } } return &authProvider{ @@ -124,7 +126,7 @@ func (ap *authProvider) FetchToken(ctx context.Context, req *auth.FetchTokenRequ httpClient := tracing.DefaultClient if tc, err := ap.tlsConfig(req.Host); err == nil && tc != nil { - transport := http.DefaultTransport() + transport := cleanhttp.DefaultTransport() transport.TLSClientConfig = tc httpClient.Transport = tracing.NewTransport(transport) } @@ -150,7 +152,7 @@ func (ap *authProvider) FetchToken(ctx context.Context, req *auth.FetchTokenRequ // Registries without support for POST may return 404 for POST /v2/token. // As of September 2017, GCR is known to return 404. // As of February 2018, JFrog Artifactory is known to return 401. - if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { + if (errStatus.StatusCode == http.StatusMethodNotAllowed && to.Username != "") || errStatus.StatusCode == http.StatusNotFound || errStatus.StatusCode == http.StatusUnauthorized { resp, err := authutil.FetchToken(ctx, httpClient, nil, to) if err != nil { return nil, err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go index 1f276c770..5e8b40b93 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -17,8 +17,8 @@ import ( type Stream interface { Context() context.Context - SendMsg(m interface{}) error - RecvMsg(m interface{}) error + SendMsg(m any) error + RecvMsg(m any) error } func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { @@ -32,7 +32,7 @@ type bufferedWriteCloser struct { } func (bwc *bufferedWriteCloser) Close() error { - if err := bwc.Writer.Flush(); err != nil { + if err := bwc.Flush(); err != nil { return errors.WithStack(err) } return bwc.Closer.Close() @@ -59,10 +59,10 @@ func (wc *streamWriterCloser) Write(dt []byte) (int, error) { return n1 + n2, nil } - if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil { + if err := wc.SendMsg(&BytesMessage{Data: dt}); err != nil { // SendMsg return EOF on remote errors if errors.Is(err, io.EOF) { - if err := errors.WithStack(wc.ClientStream.RecvMsg(struct{}{})); err != nil { + if err := errors.WithStack(wc.RecvMsg(struct{}{})); err != nil { return 0, err } } @@ -72,18 +72,18 @@ func (wc *streamWriterCloser) Write(dt []byte) (int, error) { } func (wc *streamWriterCloser) Close() error { - if err := wc.ClientStream.CloseSend(); err != nil { + if err := wc.CloseSend(); err != nil { return errors.WithStack(err) } // block until receiver is done var bm BytesMessage - if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF { + if err := wc.RecvMsg(&bm); !errors.Is(err, io.EOF) { return errors.WithStack(err) } return nil } -func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, differ fsutil.DiffType, filter func(string, *fstypes.Stat) bool) (err error) { +func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, differ fsutil.DiffType, filter, metadataOnlyFilter func(string, *fstypes.Stat) bool) (err error) { st := time.Now() defer func() { bklog.G(ds.Context()).Debugf("diffcopy took: %v", time.Since(st)) @@ -107,6 +107,7 @@ func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress p ProgressCb: progress, Filter: fsutil.FilterFunc(filter), Differ: differ, + MetadataOnly: metadataOnlyFilter, })) } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.go index 4c7e4b233..230493f16 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -3,7 +3,7 @@ package filesync import ( "context" "fmt" - io "io" + "io" "net/url" "os" "strconv" @@ -145,7 +145,7 @@ type progressCb func(int, bool) type protocol struct { name string sendFn func(stream Stream, fs fsutil.FS, progress progressCb) error - recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, differ fsutil.DiffType, mapFunc func(string, *fstypes.Stat) bool) error + recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, differ fsutil.DiffType, mapFunc, metadataOnlyFilter func(string, *fstypes.Stat) bool) error } var supportedProtocols = []protocol{ @@ -158,15 +158,17 @@ var supportedProtocols = []protocol{ // FSSendRequestOpt defines options for FSSend request type FSSendRequestOpt struct { - Name string - IncludePatterns []string - ExcludePatterns []string - FollowPaths []string - DestDir string - CacheUpdater CacheUpdater - ProgressCb func(int, bool) - Filter func(string, *fstypes.Stat) bool - Differ fsutil.DiffType + Name string + IncludePatterns []string + ExcludePatterns []string + FollowPaths []string + DestDir string + CacheUpdater CacheUpdater + ProgressCb func(int, bool) + Filter func(string, *fstypes.Stat) bool + Differ fsutil.DiffType + MetadataOnly bool + MetadataOnlyFilter func(string, *fstypes.Stat) bool } // CacheUpdater is an object capable of sending notifications for the cache hash changes @@ -233,7 +235,16 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { panic(fmt.Sprintf("invalid protocol: %q", pr.name)) } - return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Differ, opt.Filter) + var metadataOnlyFilter func(string, *fstypes.Stat) bool + if opt.MetadataOnly { + if opt.MetadataOnlyFilter != nil { + metadataOnlyFilter = opt.MetadataOnlyFilter + } else { + metadataOnlyFilter = func(string, *fstypes.Stat) bool { return false } + } + } + + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Differ, opt.Filter, metadataOnlyFilter) } type FSSyncTarget interface { @@ -264,34 +275,39 @@ func WithFSSyncDir(id int, outdir string) FSSyncTarget { } } -func NewFSSyncTarget(targets ...FSSyncTarget) session.Attachable { - fs := make(map[int]FileOutputFunc) - outdirs := make(map[int]string) - for _, t := range targets { - t := t.target() - if t.f != nil { - fs[t.id] = t.f - } - if t.outdir != "" { - outdirs[t.id] = t.outdir - } - } - return &fsSyncAttachable{ - fs: fs, - outdirs: outdirs, +func NewFSSyncTarget(targets ...FSSyncTarget) *SyncTarget { + st := &SyncTarget{ + fs: make(map[int]FileOutputFunc), + outdirs: make(map[int]string), } + st.Add(targets...) + return st } -type fsSyncAttachable struct { +type SyncTarget struct { fs map[int]FileOutputFunc outdirs map[int]string } -func (sp *fsSyncAttachable) Register(server *grpc.Server) { +var _ session.Attachable = &SyncTarget{} + +func (sp *SyncTarget) Add(targets ...FSSyncTarget) { + for _, t := range targets { + t := t.target() + if t.f != nil { + sp.fs[t.id] = t.f + } + if t.outdir != "" { + sp.outdirs[t.id] = t.outdir + } + } +} + +func (sp *SyncTarget) Register(server *grpc.Server) { RegisterFileSendServer(server, sp) } -func (sp *fsSyncAttachable) chooser(ctx context.Context) int { +func (sp *SyncTarget) chooser(ctx context.Context) int { md, ok := metadata.FromIncomingContext(ctx) if !ok { return 0 @@ -307,7 +323,7 @@ func (sp *fsSyncAttachable) chooser(ctx context.Context) int { return int(id) } -func (sp *fsSyncAttachable) DiffCopy(stream FileSend_DiffCopyServer) (err error) { +func (sp *SyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) { id := sp.chooser(stream.Context()) if outdir, ok := sp.outdirs[id]; ok { return syncTargetDiffCopy(stream, outdir) @@ -320,8 +336,8 @@ func (sp *fsSyncAttachable) DiffCopy(stream FileSend_DiffCopyServer) (err error) opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object md := map[string]string{} for k, v := range opts { - if strings.HasPrefix(k, keyExporterMetaPrefix) { - md[strings.TrimPrefix(k, keyExporterMetaPrefix)] = strings.Join(v, ",") + if after, ok0 := strings.CutPrefix(k, keyExporterMetaPrefix); ok0 { + md[after] = strings.Join(v, ",") } } wc, err := f(md) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go index 2a51550b7..7f98c7d3e 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/session/filesync/filesync.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -23,11 +24,10 @@ const ( // BytesMessage contains a chunk of byte data type BytesMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BytesMessage) Reset() { @@ -69,45 +69,25 @@ func (x *BytesMessage) GetData() []byte { var File_github_com_moby_buildkit_session_filesync_filesync_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc = []byte{ - 0x0a, 0x38, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2f, 0x66, 0x69, 0x6c, 0x65, - 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x6d, 0x6f, 0x62, 0x79, - 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, 0x1a, 0x2d, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x74, 0x6f, 0x6e, 0x69, 0x73, 0x74, 0x69, - 0x69, 0x67, 0x69, 0x2f, 0x66, 0x73, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x77, 0x69, 0x72, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x22, 0x0a, 0x0c, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, - 0x83, 0x01, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x3a, 0x0a, 0x08, - 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x14, 0x2e, 0x66, 0x73, 0x75, 0x74, 0x69, - 0x6c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x14, - 0x2e, 0x66, 0x73, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x61, - 0x63, 0x6b, 0x65, 0x74, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3b, 0x0a, 0x09, 0x54, 0x61, 0x72, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x14, 0x2e, 0x66, 0x73, 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, - 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x1a, 0x14, 0x2e, 0x66, 0x73, - 0x75, 0x74, 0x69, 0x6c, 0x2e, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x65, - 0x74, 0x28, 0x01, 0x30, 0x01, 0x32, 0x5a, 0x0a, 0x08, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x65, 0x6e, - 0x64, 0x12, 0x4e, 0x0a, 0x08, 0x44, 0x69, 0x66, 0x66, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x1e, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, - 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1e, 0x2e, - 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x2e, 0x76, 0x31, - 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, - 0x01, 0x42, 0x2b, 0x5a, 0x29, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, - 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc = "" + + "\n" + + "8github.com/moby/buildkit/session/filesync/filesync.proto\x12\x10moby.filesync.v1\x1a-github.com/tonistiigi/fsutil/types/wire.proto\"\"\n" + + "\fBytesMessage\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2\x83\x01\n" + + "\bFileSync\x12:\n" + + "\bDiffCopy\x12\x14.fsutil.types.Packet\x1a\x14.fsutil.types.Packet(\x010\x01\x12;\n" + + "\tTarStream\x12\x14.fsutil.types.Packet\x1a\x14.fsutil.types.Packet(\x010\x012Z\n" + + "\bFileSend\x12N\n" + + "\bDiffCopy\x12\x1e.moby.filesync.v1.BytesMessage\x1a\x1e.moby.filesync.v1.BytesMessage(\x010\x01B+Z)github.com/moby/buildkit/session/filesyncb\x06proto3" var ( file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescData = file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc + file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescData []byte ) func file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescData) + file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc), len(file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc))) }) return file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDescData } @@ -140,7 +120,7 @@ func file_github_com_moby_buildkit_session_filesync_filesync_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc), len(file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -151,7 +131,6 @@ func file_github_com_moby_buildkit_session_filesync_filesync_proto_init() { MessageInfos: file_github_com_moby_buildkit_session_filesync_filesync_proto_msgTypes, }.Build() File_github_com_moby_buildkit_session_filesync_filesync_proto = out.File - file_github_com_moby_buildkit_session_filesync_filesync_proto_rawDesc = nil file_github_com_moby_buildkit_session_filesync_filesync_proto_goTypes = nil file_github_com_moby_buildkit_session_filesync_filesync_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/group.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/group.go index 66d18fcc7..14e02eb7b 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/group.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/group.go @@ -73,7 +73,7 @@ func (sm *Manager) Any(ctx context.Context, g Group, f func(context.Context, str } timeoutCtx, cancel := context.WithCancelCause(ctx) - timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded)) + timeoutCtx, _ = context.WithTimeoutCause(timeoutCtx, 5*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet defer func() { cancel(errors.WithStack(context.Canceled)) }() c, err := sm.Get(timeoutCtx, id, false) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpc.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpc.go index c207e5dde..d177fd54e 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpc.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpc.go @@ -94,7 +94,7 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func(err timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5)) ctx, cancel := context.WithCancelCause(ctx) - ctx, _ = context.WithTimeoutCause(ctx, timeout, errors.WithStack(context.DeadlineExceeded)) + ctx, _ = context.WithTimeoutCause(ctx, timeout, errors.WithStack(context.DeadlineExceeded)) //nolint:govet _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) cancel(errors.WithStack(context.Canceled)) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpchijack/dial.go index 25e3ddee9..39d78be0d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpchijack/dial.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/grpchijack/dial.go @@ -2,6 +2,7 @@ package grpchijack import ( "context" + "errors" "io" "net" "strings" @@ -32,8 +33,8 @@ func Dialer(api controlapi.ControlClient) session.Dialer { type stream interface { Context() context.Context - SendMsg(m interface{}) error - RecvMsg(m interface{}) error + SendMsg(m any) error + RecvMsg(m any) error } func streamToConn(stream stream) (net.Conn, <-chan struct{}) { @@ -112,7 +113,7 @@ func (c *conn) Close() (err error) { m.Data = c.buf err = c.stream.RecvMsg(m) if err != nil { - if err != io.EOF { + if !errors.Is(err, io.EOF) { c.readMu.Unlock() return } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go index aa89213b7..950517b2e 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/session/secrets/secrets.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,12 +22,11 @@ const ( ) type GetSecretRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *GetSecretRequest) Reset() { @@ -74,11 +74,10 @@ func (x *GetSecretRequest) GetAnnotations() map[string]string { } type GetSecretResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GetSecretResponse) Reset() { @@ -120,48 +119,28 @@ func (x *GetSecretResponse) GetData() []byte { var File_github_com_moby_buildkit_session_secrets_secrets_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc = []byte{ - 0x0a, 0x36, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2e, - 0x76, 0x31, 0x22, 0xc1, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x5d, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, - 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x27, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, - 0x6f, 0x0a, 0x07, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x12, 0x64, 0x0a, 0x09, 0x47, 0x65, - 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x12, 0x2a, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2e, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x42, 0x2a, 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, - 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, - 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc = "" + + "\n" + + "6github.com/moby/buildkit/session/secrets/secrets.proto\x12\x18moby.buildkit.secrets.v1\"\xc1\x01\n" + + "\x10GetSecretRequest\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12]\n" + + "\vannotations\x18\x02 \x03(\v2;.moby.buildkit.secrets.v1.GetSecretRequest.AnnotationsEntryR\vannotations\x1a>\n" + + "\x10AnnotationsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"'\n" + + "\x11GetSecretResponse\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2o\n" + + "\aSecrets\x12d\n" + + "\tGetSecret\x12*.moby.buildkit.secrets.v1.GetSecretRequest\x1a+.moby.buildkit.secrets.v1.GetSecretResponseB*Z(github.com/moby/buildkit/session/secretsb\x06proto3" var ( file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescData = file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc + file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescData []byte ) func file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescData) + file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc), len(file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc))) }) return file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDescData } @@ -192,7 +171,7 @@ func file_github_com_moby_buildkit_session_secrets_secrets_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc), len(file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc)), NumEnums: 0, NumMessages: 3, NumExtensions: 0, @@ -203,7 +182,6 @@ func file_github_com_moby_buildkit_session_secrets_secrets_proto_init() { MessageInfos: file_github_com_moby_buildkit_session_secrets_secrets_proto_msgTypes, }.Build() File_github_com_moby_buildkit_session_secrets_secrets_proto = out.File - file_github_com_moby_buildkit_session_secrets_secrets_proto_rawDesc = nil file_github_com_moby_buildkit_session_secrets_secrets_proto_goTypes = nil file_github_com_moby_buildkit_session_secrets_secrets_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/copy.go index 804debd16..506236cd6 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/copy.go @@ -9,8 +9,8 @@ import ( ) type Stream interface { - SendMsg(m interface{}) error - RecvMsg(m interface{}) error + SendMsg(m any) error + RecvMsg(m any) error } func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error { @@ -21,7 +21,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStre p := &BytesMessage{} for { if err := stream.RecvMsg(p); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { // indicates client performed CloseSend, but they may still be // reading data if closeWriter, ok := conn.(interface { @@ -55,7 +55,7 @@ func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStre buf := make([]byte, 32*1024) n, err := conn.Read(buf) switch { - case err == io.EOF: + case errors.Is(err, io.EOF): if closeStream != nil { closeStream() } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.go index 8a041b311..7cd257bc3 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.go @@ -40,7 +40,7 @@ func (s *server) run(ctx context.Context, l net.Listener, id string) error { opts := make(map[string][]string) opts[KeySSHID] = []string{id} - ctx = metadata.NewOutgoingContext(ctx, opts) + ctx := metadata.NewOutgoingContext(ctx, opts) stream, err := client.ForwardAgent(ctx) if err != nil { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go index 330645a67..a69637757 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/session/sshforward/ssh.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,11 +23,10 @@ const ( // BytesMessage contains a chunk of byte data type BytesMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BytesMessage) Reset() { @@ -67,11 +67,10 @@ func (x *BytesMessage) GetData() []byte { } type CheckAgentRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CheckAgentRequest) Reset() { @@ -112,9 +111,9 @@ func (x *CheckAgentRequest) GetID() string { } type CheckAgentResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CheckAgentResponse) Reset() { @@ -149,43 +148,27 @@ func (*CheckAgentResponse) Descriptor() ([]byte, []int) { var File_github_com_moby_buildkit_session_sshforward_ssh_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2f, 0x73, 0x73, 0x68, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x2f, 0x73, 0x73, - 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x73, 0x73, - 0x68, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x22, 0x22, 0x0a, 0x0c, 0x42, - 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, - 0x23, 0x0a, 0x11, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x49, 0x44, 0x22, 0x14, 0x0a, 0x12, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xba, 0x01, 0x0a, 0x03, 0x53, - 0x53, 0x48, 0x12, 0x5b, 0x0a, 0x0a, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x12, 0x25, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x73, 0x73, 0x68, 0x66, 0x6f, 0x72, 0x77, 0x61, - 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x26, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x73, - 0x73, 0x68, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x56, 0x0a, 0x0c, 0x46, 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, - 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x73, 0x73, 0x68, 0x66, 0x6f, 0x72, 0x77, 0x61, 0x72, - 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x1a, 0x20, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x73, 0x73, 0x68, 0x66, 0x6f, 0x72, 0x77, - 0x61, 0x72, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2f, 0x73, 0x73, 0x68, 0x66, - 0x6f, 0x72, 0x77, 0x61, 0x72, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc = "" + + "\n" + + "5github.com/moby/buildkit/session/sshforward/ssh.proto\x12\x12moby.sshforward.v1\"\"\n" + + "\fBytesMessage\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data\"#\n" + + "\x11CheckAgentRequest\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\"\x14\n" + + "\x12CheckAgentResponse2\xba\x01\n" + + "\x03SSH\x12[\n" + + "\n" + + "CheckAgent\x12%.moby.sshforward.v1.CheckAgentRequest\x1a&.moby.sshforward.v1.CheckAgentResponse\x12V\n" + + "\fForwardAgent\x12 .moby.sshforward.v1.BytesMessage\x1a .moby.sshforward.v1.BytesMessage(\x010\x01B-Z+github.com/moby/buildkit/session/sshforwardb\x06proto3" var ( file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescData = file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc + file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescData []byte ) func file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescData) + file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc), len(file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc))) }) return file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDescData } @@ -217,7 +200,7 @@ func file_github_com_moby_buildkit_session_sshforward_ssh_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc), len(file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc)), NumEnums: 0, NumMessages: 3, NumExtensions: 0, @@ -228,7 +211,6 @@ func file_github_com_moby_buildkit_session_sshforward_ssh_proto_init() { MessageInfos: file_github_com_moby_buildkit_session_sshforward_ssh_proto_msgTypes, }.Build() File_github_com_moby_buildkit_session_sshforward_ssh_proto = out.File - file_github_com_moby_buildkit_session_sshforward_ssh_proto_rawDesc = nil file_github_com_moby_buildkit_session_sshforward_ssh_proto_goTypes = nil file_github_com_moby_buildkit_session_sshforward_ssh_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go index 4b263143d..8232ce857 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go @@ -14,47 +14,60 @@ import ( "github.com/pkg/errors" "golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh/agent" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" ) // AgentConfig is the config for a single exposed SSH agent type AgentConfig struct { ID string Paths []string + Raw bool +} + +func (conf AgentConfig) toDialer() (dialerFn, error) { + if len(conf.Paths) != 1 && conf.Raw { + return nil, errors.New("raw mode must supply exactly one path") + } + + if len(conf.Paths) == 0 || len(conf.Paths) == 1 && conf.Paths[0] == "" { + conf.Paths = []string{os.Getenv("SSH_AUTH_SOCK")} + } + + if conf.Paths[0] == "" { + p, err := getFallbackAgentPath() + if err != nil { + return nil, errors.Wrap(err, "invalid empty ssh agent socket") + } + conf.Paths[0] = p + } + + dialer, err := toDialer(conf.Paths, conf.Raw) + if err != nil { + return nil, errors.Wrapf(err, "failed to convert agent config for ID: %q", conf.ID) + } + + return dialer, nil } // NewSSHAgentProvider creates a session provider that allows access to ssh agent func NewSSHAgentProvider(confs []AgentConfig) (session.Attachable, error) { - m := map[string]source{} + m := make(map[string]dialerFn, len(confs)) for _, conf := range confs { - if len(conf.Paths) == 0 || len(conf.Paths) == 1 && conf.Paths[0] == "" { - conf.Paths = []string{os.Getenv("SSH_AUTH_SOCK")} - } - - if conf.Paths[0] == "" { - p, err := getFallbackAgentPath() - if err != nil { - return nil, errors.Wrap(err, "invalid empty ssh agent socket") - } - conf.Paths[0] = p - } - - src, err := toAgentSource(conf.Paths) - if err != nil { - return nil, err - } if conf.ID == "" { conf.ID = sshforward.DefaultID } if _, ok := m[conf.ID]; ok { - return nil, errors.Errorf("invalid duplicate ID %s", conf.ID) + return nil, errors.Errorf("duplicate agent ID %q", conf.ID) } - m[conf.ID] = src - } - return &socketProvider{m: m}, nil + dialer, err := conf.toDialer() + if err != nil { + return nil, errors.Wrapf(err, "failed to convert agent config %v", conf) + } + m[conf.ID] = dialer + } + return &socketProvider{ + m: m, + }, nil } type source struct { @@ -67,7 +80,35 @@ type socketDialer struct { dialer func(string) (net.Conn, error) } -func (s socketDialer) Dial() (net.Conn, error) { +func (s source) agentDialer(ctx context.Context) (net.Conn, error) { + var a agent.Agent + + var agentConn net.Conn + if s.socket != nil { + conn, err := s.socket.Dial(ctx) + if err != nil { + return nil, errors.Wrapf(err, "failed to connect to %s", s.socket) + } + + agentConn = conn + a = &readOnlyAgent{agent.NewClient(conn)} + } else { + a = s.agent + } + + c1, c2 := net.Pipe() + go func() { + agent.ServeAgent(a, c1) + c1.Close() + if agentConn != nil { + agentConn.Close() + } + }() + + return c2, nil +} + +func (s socketDialer) Dial(ctx context.Context) (net.Conn, error) { return s.dialer(s.path) } @@ -75,76 +116,13 @@ func (s socketDialer) String() string { return s.path } -type socketProvider struct { - m map[string]source -} - -func (sp *socketProvider) Register(server *grpc.Server) { - sshforward.RegisterSSHServer(server, sp) -} - -func (sp *socketProvider) CheckAgent(ctx context.Context, req *sshforward.CheckAgentRequest) (*sshforward.CheckAgentResponse, error) { - id := sshforward.DefaultID - if req.ID != "" { - id = req.ID - } - if _, ok := sp.m[id]; !ok { - return &sshforward.CheckAgentResponse{}, errors.Errorf("unset ssh forward key %s", id) - } - return &sshforward.CheckAgentResponse{}, nil -} - -func (sp *socketProvider) ForwardAgent(stream sshforward.SSH_ForwardAgentServer) error { - id := sshforward.DefaultID - - opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object - - if v, ok := opts[sshforward.KeySSHID]; ok && len(v) > 0 && v[0] != "" { - id = v[0] - } - - src, ok := sp.m[id] - if !ok { - return errors.Errorf("unset ssh forward key %s", id) - } - - var a agent.Agent - - if src.socket != nil { - conn, err := src.socket.Dial() - if err != nil { - return errors.Wrapf(err, "failed to connect to %s", src.socket) - } - - a = &readOnlyAgent{agent.NewClient(conn)} - defer conn.Close() - } else { - a = src.agent - } - - s1, s2 := sockPair() - - eg, ctx := errgroup.WithContext(context.TODO()) - - eg.Go(func() error { - return agent.ServeAgent(a, s1) - }) - - eg.Go(func() error { - defer s1.Close() - return sshforward.Copy(ctx, s2, stream, nil) - }) - - return eg.Wait() -} - -func toAgentSource(paths []string) (source, error) { +func toDialer(paths []string, raw bool) (func(context.Context) (net.Conn, error), error) { var keys bool var socket *socketDialer a := agent.NewKeyring() for _, p := range paths { if socket != nil { - return source{}, errors.New("only single socket allowed") + return nil, errors.New("only single socket allowed") } if parsed := getWindowsPipeDialer(p); parsed != nil { @@ -154,21 +132,24 @@ func toAgentSource(paths []string) (source, error) { fi, err := os.Stat(p) if err != nil { - return source{}, errors.WithStack(err) + return nil, errors.WithStack(err) } if fi.Mode()&os.ModeSocket > 0 { socket = &socketDialer{path: p, dialer: unixSocketDialer} continue } + if raw { + return nil, errors.Errorf("raw mode only supported with socket paths") + } f, err := os.Open(p) if err != nil { - return source{}, errors.Wrapf(err, "failed to open %s", p) + return nil, errors.Wrapf(err, "failed to open %s", p) } dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) _ = f.Close() if err != nil { - return source{}, errors.Wrapf(err, "failed to read %s", p) + return nil, errors.Wrapf(err, "failed to read %s", p) } k, err := ssh.ParseRawPrivateKey(dt) @@ -178,16 +159,16 @@ func toAgentSource(paths []string) (source, error) { // If parsing the file fails, check to see if it kind of looks like socket-shaped. if runtime.GOOS == "windows" && strings.Contains(string(dt), "socket") { if keys { - return source{}, errors.Errorf("invalid combination of keys and sockets") + return nil, errors.Errorf("invalid combination of keys and sockets") } socket = &socketDialer{path: p, dialer: unixSocketDialer} continue } - return source{}, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase? + return nil, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase? } if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil { - return source{}, errors.Wrapf(err, "failed to add %s to agent", p) + return nil, errors.Wrapf(err, "failed to add %s to agent", p) } keys = true @@ -195,30 +176,27 @@ func toAgentSource(paths []string) (source, error) { if socket != nil { if keys { - return source{}, errors.Errorf("invalid combination of keys and sockets") + return nil, errors.Errorf("invalid combination of keys and sockets") } - return source{socket: socket}, nil + if raw { + return func(ctx context.Context) (net.Conn, error) { + return socket.Dial(ctx) + }, nil + } + return source{socket: socket}.agentDialer, nil } - return source{agent: a}, nil + if raw { + return nil, errors.New("raw mode must supply exactly one socket path") + } + + return source{agent: a}.agentDialer, nil } func unixSocketDialer(path string) (net.Conn, error) { return net.DialTimeout("unix", path, 2*time.Second) } -func sockPair() (io.ReadWriteCloser, io.ReadWriteCloser) { - pr1, pw1 := io.Pipe() - pr2, pw2 := io.Pipe() - return &sock{pr1, pw2, pw1}, &sock{pr2, pw1, pw2} -} - -type sock struct { - io.Reader - io.Writer - io.Closer -} - type readOnlyAgent struct { agent.ExtendedAgent } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/raw_provider.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/raw_provider.go new file mode 100644 index 000000000..8fed40b0c --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/raw_provider.go @@ -0,0 +1,58 @@ +package sshprovider + +import ( + "context" + "net" + + "github.com/moby/buildkit/session/sshforward" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type dialerFn func(ctx context.Context) (net.Conn, error) + +type socketProvider struct { + m map[string]dialerFn +} + +func (p *socketProvider) CheckAgent(ctx context.Context, req *sshforward.CheckAgentRequest) (*sshforward.CheckAgentResponse, error) { + id := sshforward.DefaultID + if req.ID != "" { + id = req.ID + } + + _, ok := p.m[id] + if !ok { + return nil, errors.Errorf("unset ssh forward key %s", id) + } + return &sshforward.CheckAgentResponse{}, nil +} + +func (p *socketProvider) ForwardAgent(stream sshforward.SSH_ForwardAgentServer) error { + id := sshforward.DefaultID + + ctx := stream.Context() + opts, _ := metadata.FromIncomingContext(ctx) + + if v, ok := opts[sshforward.KeySSHID]; ok && len(v) > 0 && v[0] != "" { + id = v[0] + } + + dialer, ok := p.m[id] + if !ok { + return errors.Errorf("unset ssh forward key %s", id) + } + + conn, err := dialer(ctx) + if err != nil { + return errors.Wrapf(err, "failed to dial agent %s", id) + } + defer conn.Close() + + return sshforward.Copy(ctx, conn, stream, nil) +} + +func (p *socketProvider) Register(srv *grpc.Server) { + sshforward.RegisterSSHServer(srv, p) +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.go index e3fce5c1c..30e44eb38 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.go @@ -42,7 +42,7 @@ func (u *Upload) WriteTo(w io.Writer) (int64, error) { for { var bm BytesMessage if err := u.cc.RecvMsg(&bm); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return n, nil } return n, errors.WithStack(err) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.pb.go index 3a9d8bb77..f1d0ba1ec 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/upload.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/session/upload/upload.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,11 +23,10 @@ const ( // BytesMessage contains a chunk of byte data type BytesMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` unknownFields protoimpl.UnknownFields - - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BytesMessage) Reset() { @@ -68,32 +68,22 @@ func (x *BytesMessage) GetData() []byte { var File_github_com_moby_buildkit_session_upload_upload_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc = []byte{ - 0x0a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, - 0x6f, 0x6e, 0x2f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x75, 0x70, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x22, 0x22, 0x0a, 0x0c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, 0x50, 0x0a, 0x06, 0x55, 0x70, - 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x46, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1c, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x1c, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x79, 0x74, 0x65, - 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x28, 0x01, 0x30, 0x01, 0x42, 0x29, 0x5a, 0x27, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x2f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc = "" + + "\n" + + "4github.com/moby/buildkit/session/upload/upload.proto\x12\x0emoby.upload.v1\"\"\n" + + "\fBytesMessage\x12\x12\n" + + "\x04data\x18\x01 \x01(\fR\x04data2P\n" + + "\x06Upload\x12F\n" + + "\x04Pull\x12\x1c.moby.upload.v1.BytesMessage\x1a\x1c.moby.upload.v1.BytesMessage(\x010\x01B)Z'github.com/moby/buildkit/session/uploadb\x06proto3" var ( file_github_com_moby_buildkit_session_upload_upload_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_session_upload_upload_proto_rawDescData = file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc + file_github_com_moby_buildkit_session_upload_upload_proto_rawDescData []byte ) func file_github_com_moby_buildkit_session_upload_upload_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_session_upload_upload_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_session_upload_upload_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_session_upload_upload_proto_rawDescData) + file_github_com_moby_buildkit_session_upload_upload_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc), len(file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc))) }) return file_github_com_moby_buildkit_session_upload_upload_proto_rawDescData } @@ -121,7 +111,7 @@ func file_github_com_moby_buildkit_session_upload_upload_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc), len(file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -132,7 +122,6 @@ func file_github_com_moby_buildkit_session_upload_upload_proto_init() { MessageInfos: file_github_com_moby_buildkit_session_upload_upload_proto_msgTypes, }.Build() File_github_com_moby_buildkit_session_upload_upload_proto = out.File - file_github_com_moby_buildkit_session_upload_upload_proto_rawDesc = nil file_github_com_moby_buildkit_session_upload_upload_proto_goTypes = nil file_github_com_moby_buildkit_session_upload_upload_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go index 2af434d80..96815302b 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go @@ -63,23 +63,20 @@ type writer struct { grpc.ServerStream } -func (w *writer) Write(dt []byte) (int, error) { - // avoid sending too big messages on grpc stream +func (w *writer) Write(dt []byte) (n int, err error) { const maxChunkSize = 3 * 1024 * 1024 - if len(dt) > maxChunkSize { - n1, err := w.Write(dt[:maxChunkSize]) - if err != nil { - return n1, err + for len(dt) > 0 { + data := dt + if len(data) > maxChunkSize { + data = data[:maxChunkSize] } - dt = dt[maxChunkSize:] - var n2 int - if n2, err := w.Write(dt); err != nil { - return n1 + n2, err + + msg := &upload.BytesMessage{Data: data} + if err := w.SendMsg(msg); err != nil { + return n, err } - return n1 + n2, nil + n += len(data) + dt = dt[len(data):] } - if err := w.SendMsg(&upload.BytesMessage{Data: dt}); err != nil { - return 0, err - } - return len(dt), nil + return n, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go index e94043395..d19593d75 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/solver/errdefs/errdefs.proto @@ -12,6 +12,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,11 +23,10 @@ const ( ) type Vertex struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` unknownFields protoimpl.UnknownFields - - Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Vertex) Reset() { @@ -67,12 +67,11 @@ func (x *Vertex) GetDigest() string { } type Source struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Info *pb.SourceInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` + Ranges []*pb.Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` unknownFields protoimpl.UnknownFields - - Info *pb.SourceInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Ranges []*pb.Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Source) Reset() { @@ -119,17 +118,68 @@ func (x *Source) GetRanges() []*pb.Range { return nil } -type FrontendCap struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache +type Frontend struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // frontend name e.g. dockerfile.v0 or gateway.v0 + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` // used by the gateway frontend to identify the source, which corresponds to the image name unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +func (x *Frontend) Reset() { + *x = Frontend{} + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Frontend) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Frontend) ProtoMessage() {} + +func (x *Frontend) ProtoReflect() protoreflect.Message { + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Frontend.ProtoReflect.Descriptor instead. +func (*Frontend) Descriptor() ([]byte, []int) { + return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{2} +} + +func (x *Frontend) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Frontend) GetSource() string { + if x != nil { + return x.Source + } + return "" +} + +type FrontendCap struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FrontendCap) Reset() { *x = FrontendCap{} - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[2] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -141,7 +191,7 @@ func (x *FrontendCap) String() string { func (*FrontendCap) ProtoMessage() {} func (x *FrontendCap) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[2] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -154,7 +204,7 @@ func (x *FrontendCap) ProtoReflect() protoreflect.Message { // Deprecated: Use FrontendCap.ProtoReflect.Descriptor instead. func (*FrontendCap) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{2} + return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{3} } func (x *FrontendCap) GetName() string { @@ -165,16 +215,15 @@ func (x *FrontendCap) GetName() string { } type Subrequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Subrequest) Reset() { *x = Subrequest{} - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[3] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -186,7 +235,7 @@ func (x *Subrequest) String() string { func (*Subrequest) ProtoMessage() {} func (x *Subrequest) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[3] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -199,7 +248,7 @@ func (x *Subrequest) ProtoReflect() protoreflect.Message { // Deprecated: Use Subrequest.ProtoReflect.Descriptor instead. func (*Subrequest) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{3} + return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{4} } func (x *Subrequest) GetName() string { @@ -210,24 +259,23 @@ func (x *Subrequest) GetName() string { } type Solve struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - InputIDs []string `protobuf:"bytes,1,rep,name=inputIDs,proto3" json:"inputIDs,omitempty"` - MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"` - Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"` - // Types that are assignable to Subject: + state protoimpl.MessageState `protogen:"open.v1"` + InputIDs []string `protobuf:"bytes,1,rep,name=inputIDs,proto3" json:"inputIDs,omitempty"` + MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"` + Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"` + // Types that are valid to be assigned to Subject: // // *Solve_File // *Solve_Cache - Subject isSolve_Subject `protobuf_oneof:"subject"` - Description map[string]string `protobuf:"bytes,6,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Subject isSolve_Subject `protobuf_oneof:"subject"` + Description map[string]string `protobuf:"bytes,6,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Solve) Reset() { *x = Solve{} - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[4] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -239,7 +287,7 @@ func (x *Solve) String() string { func (*Solve) ProtoMessage() {} func (x *Solve) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[4] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -252,7 +300,7 @@ func (x *Solve) ProtoReflect() protoreflect.Message { // Deprecated: Use Solve.ProtoReflect.Descriptor instead. func (*Solve) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{4} + return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{5} } func (x *Solve) GetInputIDs() []string { @@ -276,23 +324,27 @@ func (x *Solve) GetOp() *pb.Op { return nil } -func (m *Solve) GetSubject() isSolve_Subject { - if m != nil { - return m.Subject +func (x *Solve) GetSubject() isSolve_Subject { + if x != nil { + return x.Subject } return nil } func (x *Solve) GetFile() *FileAction { - if x, ok := x.GetSubject().(*Solve_File); ok { - return x.File + if x != nil { + if x, ok := x.Subject.(*Solve_File); ok { + return x.File + } } return nil } func (x *Solve) GetCache() *ContentCache { - if x, ok := x.GetSubject().(*Solve_Cache); ok { - return x.Cache + if x != nil { + if x, ok := x.Subject.(*Solve_Cache); ok { + return x.Cache + } } return nil } @@ -321,17 +373,16 @@ func (*Solve_File) isSolve_Subject() {} func (*Solve_Cache) isSolve_Subject() {} type FileAction struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Index of the file action that failed the exec. - Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileAction) Reset() { *x = FileAction{} - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[5] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -343,7 +394,7 @@ func (x *FileAction) String() string { func (*FileAction) ProtoMessage() {} func (x *FileAction) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[5] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -356,7 +407,7 @@ func (x *FileAction) ProtoReflect() protoreflect.Message { // Deprecated: Use FileAction.ProtoReflect.Descriptor instead. func (*FileAction) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{5} + return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{6} } func (x *FileAction) GetIndex() int64 { @@ -367,17 +418,16 @@ func (x *FileAction) GetIndex() int64 { } type ContentCache struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Original index of result that failed the slow cache calculation. - Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Index int64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ContentCache) Reset() { *x = ContentCache{} - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[6] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -389,7 +439,7 @@ func (x *ContentCache) String() string { func (*ContentCache) ProtoMessage() {} func (x *ContentCache) ProtoReflect() protoreflect.Message { - mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[6] + mi := &file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -402,7 +452,7 @@ func (x *ContentCache) ProtoReflect() protoreflect.Message { // Deprecated: Use ContentCache.ProtoReflect.Descriptor instead. func (*ContentCache) Descriptor() ([]byte, []int) { - return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{6} + return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP(), []int{7} } func (x *ContentCache) GetIndex() int64 { @@ -414,89 +464,73 @@ func (x *ContentCache) GetIndex() int64 { var File_github_com_moby_buildkit_solver_errdefs_errdefs_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, - 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, - 0x1a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x72, 0x2f, 0x70, 0x62, 0x2f, 0x6f, 0x70, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x20, - 0x0a, 0x06, 0x56, 0x65, 0x72, 0x74, 0x65, 0x78, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, - 0x22, 0x4f, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x04, 0x69, 0x6e, - 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x21, - 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, - 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x22, 0x21, 0x0a, 0x0b, 0x46, 0x72, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x64, 0x43, 0x61, 0x70, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x20, 0x0a, 0x0a, 0x53, 0x75, 0x62, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xbf, 0x02, 0x0a, 0x05, 0x53, 0x6f, 0x6c, 0x76, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x49, 0x44, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x08, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x49, 0x44, 0x73, 0x12, 0x1a, 0x0a, 0x08, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x44, 0x73, 0x12, 0x16, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x06, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x52, 0x02, 0x6f, 0x70, - 0x12, 0x29, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x63, - 0x61, 0x63, 0x68, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x66, 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, - 0x65, 0x48, 0x00, 0x52, 0x05, 0x63, 0x61, 0x63, 0x68, 0x65, 0x12, 0x41, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x2e, 0x53, 0x6f, 0x6c, 0x76, 0x65, 0x2e, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x3e, 0x0a, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x09, 0x0a, - 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0x24, 0x0a, 0x0c, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x14, 0x0a, 0x05, - 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x42, 0x29, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, - 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x66, 0x73, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc = "" + + "\n" + + "5github.com/moby/buildkit/solver/errdefs/errdefs.proto\x12\aerrdefs\x1a,github.com/moby/buildkit/solver/pb/ops.proto\" \n" + + "\x06Vertex\x12\x16\n" + + "\x06digest\x18\x01 \x01(\tR\x06digest\"O\n" + + "\x06Source\x12\"\n" + + "\x04info\x18\x01 \x01(\v2\x0e.pb.SourceInfoR\x04info\x12!\n" + + "\x06ranges\x18\x02 \x03(\v2\t.pb.RangeR\x06ranges\"6\n" + + "\bFrontend\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06source\x18\x02 \x01(\tR\x06source\"!\n" + + "\vFrontendCap\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\" \n" + + "\n" + + "Subrequest\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\"\xbf\x02\n" + + "\x05Solve\x12\x1a\n" + + "\binputIDs\x18\x01 \x03(\tR\binputIDs\x12\x1a\n" + + "\bmountIDs\x18\x02 \x03(\tR\bmountIDs\x12\x16\n" + + "\x02op\x18\x03 \x01(\v2\x06.pb.OpR\x02op\x12)\n" + + "\x04file\x18\x04 \x01(\v2\x13.errdefs.FileActionH\x00R\x04file\x12-\n" + + "\x05cache\x18\x05 \x01(\v2\x15.errdefs.ContentCacheH\x00R\x05cache\x12A\n" + + "\vdescription\x18\x06 \x03(\v2\x1f.errdefs.Solve.DescriptionEntryR\vdescription\x1a>\n" + + "\x10DescriptionEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01B\t\n" + + "\asubject\"\"\n" + + "\n" + + "FileAction\x12\x14\n" + + "\x05index\x18\x01 \x01(\x03R\x05index\"$\n" + + "\fContentCache\x12\x14\n" + + "\x05index\x18\x01 \x01(\x03R\x05indexB)Z'github.com/moby/buildkit/solver/errdefsb\x06proto3" var ( file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescData = file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc + file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescData []byte ) func file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescData) + file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc), len(file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc))) }) return file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDescData } -var file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes = make([]protoimpl.MessageInfo, 8) +var file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes = make([]protoimpl.MessageInfo, 9) var file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_goTypes = []any{ (*Vertex)(nil), // 0: errdefs.Vertex (*Source)(nil), // 1: errdefs.Source - (*FrontendCap)(nil), // 2: errdefs.FrontendCap - (*Subrequest)(nil), // 3: errdefs.Subrequest - (*Solve)(nil), // 4: errdefs.Solve - (*FileAction)(nil), // 5: errdefs.FileAction - (*ContentCache)(nil), // 6: errdefs.ContentCache - nil, // 7: errdefs.Solve.DescriptionEntry - (*pb.SourceInfo)(nil), // 8: pb.SourceInfo - (*pb.Range)(nil), // 9: pb.Range - (*pb.Op)(nil), // 10: pb.Op + (*Frontend)(nil), // 2: errdefs.Frontend + (*FrontendCap)(nil), // 3: errdefs.FrontendCap + (*Subrequest)(nil), // 4: errdefs.Subrequest + (*Solve)(nil), // 5: errdefs.Solve + (*FileAction)(nil), // 6: errdefs.FileAction + (*ContentCache)(nil), // 7: errdefs.ContentCache + nil, // 8: errdefs.Solve.DescriptionEntry + (*pb.SourceInfo)(nil), // 9: pb.SourceInfo + (*pb.Range)(nil), // 10: pb.Range + (*pb.Op)(nil), // 11: pb.Op } var file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_depIdxs = []int32{ - 8, // 0: errdefs.Source.info:type_name -> pb.SourceInfo - 9, // 1: errdefs.Source.ranges:type_name -> pb.Range - 10, // 2: errdefs.Solve.op:type_name -> pb.Op - 5, // 3: errdefs.Solve.file:type_name -> errdefs.FileAction - 6, // 4: errdefs.Solve.cache:type_name -> errdefs.ContentCache - 7, // 5: errdefs.Solve.description:type_name -> errdefs.Solve.DescriptionEntry + 9, // 0: errdefs.Source.info:type_name -> pb.SourceInfo + 10, // 1: errdefs.Source.ranges:type_name -> pb.Range + 11, // 2: errdefs.Solve.op:type_name -> pb.Op + 6, // 3: errdefs.Solve.file:type_name -> errdefs.FileAction + 7, // 4: errdefs.Solve.cache:type_name -> errdefs.ContentCache + 8, // 5: errdefs.Solve.description:type_name -> errdefs.Solve.DescriptionEntry 6, // [6:6] is the sub-list for method output_type 6, // [6:6] is the sub-list for method input_type 6, // [6:6] is the sub-list for extension type_name @@ -509,7 +543,7 @@ func file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_init() { if File_github_com_moby_buildkit_solver_errdefs_errdefs_proto != nil { return } - file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[4].OneofWrappers = []any{ + file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes[5].OneofWrappers = []any{ (*Solve_File)(nil), (*Solve_Cache)(nil), } @@ -517,9 +551,9 @@ func file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc), len(file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc)), NumEnums: 0, - NumMessages: 8, + NumMessages: 9, NumExtensions: 0, NumServices: 0, }, @@ -528,7 +562,6 @@ func file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_init() { MessageInfos: file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_msgTypes, }.Build() File_github_com_moby_buildkit_solver_errdefs_errdefs_proto = out.File - file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_rawDesc = nil file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_goTypes = nil file_github_com_moby_buildkit_solver_errdefs_errdefs_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto index 97489b930..df0dd8c95 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs.proto @@ -15,6 +15,11 @@ message Source { repeated pb.Range ranges = 2; } +message Frontend { + string name = 1; // frontend name e.g. dockerfile.v0 or gateway.v0 + string source = 2; // used by the gateway frontend to identify the source, which corresponds to the image name +} + message FrontendCap { string name = 1; } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs_vtproto.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs_vtproto.pb.go index f94174ed3..597ca4a89 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs_vtproto.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/errdefs_vtproto.pb.go @@ -61,6 +61,24 @@ func (m *Source) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *Frontend) CloneVT() *Frontend { + if m == nil { + return (*Frontend)(nil) + } + r := new(Frontend) + r.Name = m.Name + r.Source = m.Source + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Frontend) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *FrontendCap) CloneVT() *FrontendCap { if m == nil { return (*FrontendCap)(nil) @@ -239,6 +257,28 @@ func (this *Source) EqualMessageVT(thatMsg proto.Message) bool { } return this.EqualVT(that) } +func (this *Frontend) EqualVT(that *Frontend) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Name != that.Name { + return false + } + if this.Source != that.Source { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Frontend) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Frontend) + if !ok { + return false + } + return this.EqualVT(that) +} func (this *FrontendCap) EqualVT(that *FrontendCap) bool { if this == that { return true @@ -519,6 +559,53 @@ func (m *Source) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Frontend) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Frontend) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Frontend) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Source) > 0 { + i -= len(m.Source) + copy(dAtA[i:], m.Source) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Source))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *FrontendCap) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -844,6 +931,24 @@ func (m *Source) SizeVT() (n int) { return n } +func (m *Frontend) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + n += len(m.unknownFields) + return n +} + func (m *FrontendCap) SizeVT() (n int) { if m == nil { return 0 @@ -1167,6 +1272,121 @@ func (m *Source) UnmarshalVT(dAtA []byte) error { } return nil } +func (m *Frontend) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Frontend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Frontend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *FrontendCap) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/frontend.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/frontend.go new file mode 100644 index 000000000..70137878e --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/frontend.go @@ -0,0 +1,50 @@ +package errdefs + +import ( + "errors" + "fmt" + + "github.com/containerd/typeurl/v2" + "github.com/moby/buildkit/util/grpcerrors" +) + +func init() { + typeurl.Register((*Frontend)(nil), "github.com/moby/buildkit", "errdefs.Frontend+json") +} + +type FrontendError struct { + *Frontend + error +} + +func (e *FrontendError) Error() string { + // These can be nested, so avoid adding any details to the error message + // if we already have an error. Otherwise the resulting error message + // can be very long and not very useful. + if e.error != nil { + return e.error.Error() + } + return fmt.Sprintf("frontend %s failed", e.Name) +} + +func (e *FrontendError) Unwrap() error { + return e.error +} + +func (e *FrontendError) ToProto() grpcerrors.TypedErrorProto { + return e.Frontend +} + +func (v *Frontend) WrapError(err error) error { + return &FrontendError{error: err, Frontend: v} +} + +func Frontends(err error) []*Frontend { + var out []*Frontend + var es *FrontendError + if errors.As(err, &es) { + out = Frontends(es.Unwrap()) + out = append(out, es.CloneVT()) + } + return out +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/frontendcap.go similarity index 91% rename from src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go rename to src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/frontendcap.go index 72ee63682..d7acf0521 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/fronetendcap.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/frontendcap.go @@ -17,7 +17,7 @@ type UnsupportedFrontendCapError struct { } func (e *UnsupportedFrontendCapError) Error() string { - msg := fmt.Sprintf("unsupported frontend capability %s", e.FrontendCap.Name) + msg := fmt.Sprintf("unsupported frontend capability %s", e.Name) if e.error != nil { msg += ": " + e.error.Error() } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/op.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/op.go index 6fe44976e..519636b08 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/op.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/op.go @@ -12,7 +12,7 @@ func (e *OpError) Unwrap() error { return e.error } -func WithOp(err error, anyOp interface{}, opDesc map[string]string) error { +func WithOp(err error, anyOp any, opDesc map[string]string) error { op, ok := anyOp.(*pb.Op) if err == nil || !ok { return err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/solve.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/solve.go index 3b457550e..691983caf 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/solve.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/solve.go @@ -14,7 +14,7 @@ func init() { typeurl.Register((*Solve)(nil), "github.com/moby/buildkit", "errdefs.Solve+json") } -//nolint:revive +//nolint:revive,staticcheck type IsSolve_Subject isSolve_Subject // SolveError will be returned when an error is encountered during a solve that diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/source.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/source.go index 06e3cd86a..290ed06db 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/source.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/source.go @@ -14,34 +14,34 @@ func WithSource(err error, src *Source) error { if err == nil { return nil } - return &ErrorSource{Source: src, error: err} + return &SourceError{Source: src, error: err} } -type ErrorSource struct { +type SourceError struct { *Source error } -func (e *ErrorSource) Unwrap() error { +func (e *SourceError) Unwrap() error { return e.error } -func (e *ErrorSource) ToProto() grpcerrors.TypedErrorProto { +func (e *SourceError) ToProto() grpcerrors.TypedErrorProto { return e.Source } func Sources(err error) []*Source { var out []*Source - var es *ErrorSource + var es *SourceError if errors.As(err, &es) { out = Sources(es.Unwrap()) - out = append(out, es.Source.CloneVT()) + out = append(out, es.CloneVT()) } return out } func (s *Source) WrapError(err error) error { - return &ErrorSource{error: err, Source: s} + return &SourceError{error: err, Source: s} } func (s *Source) Print(w io.Writer) error { @@ -69,10 +69,7 @@ func (s *Source) Print(w io.Writer) error { var p int prepadStart := start - for { - if p >= pad { - break - } + for p < pad { if start > 1 { start-- p++ @@ -98,10 +95,7 @@ func (s *Source) Print(w io.Writer) error { func containsLine(rr []*pb.Range, l int) bool { for _, r := range rr { - e := r.End.Line - if e < r.Start.Line { - e = r.Start.Line - } + e := max(r.End.Line, r.Start.Line) if r.Start.Line <= int32(l) && e >= int32(l) { return true } @@ -112,10 +106,7 @@ func containsLine(rr []*pb.Range, l int) bool { func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) { first := true for _, r := range rr { - e := r.End.Line - if e < r.Start.Line { - e = r.Start.Line - } + e := max(r.End.Line, r.Start.Line) if first || int(r.Start.Line) < start { start = int(r.Start.Line) } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go index 0fba51d3c..254aebe77 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/errdefs/subrequest.go @@ -17,7 +17,7 @@ type UnsupportedSubrequestError struct { } func (e *UnsupportedSubrequestError) Error() string { - msg := fmt.Sprintf("unsupported request %s", e.Subrequest.Name) + msg := fmt.Sprintf("unsupported request %s", e.Name) if e.error != nil { msg += ": " + e.error.Error() } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/types/types.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/types/types.go new file mode 100644 index 000000000..869ad0b5b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/llbsolver/provenance/types/types.go @@ -0,0 +1,261 @@ +package types + +import ( + "slices" + + slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common" + slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2" + slsa1 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1" + resourcestypes "github.com/moby/buildkit/executor/resources/types" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + BuildKitBuildType = "https://mobyproject.org/buildkit@v1" +) + +type BuildConfig struct { + Definition []BuildStep `json:"llbDefinition,omitempty"` + DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` +} + +type BuildStep struct { + ID string `json:"id,omitempty"` + Op *pb.Op `json:"op,omitempty"` + Inputs []string `json:"inputs,omitempty"` + ResourceUsage *resourcestypes.Samples `json:"resourceUsage,omitempty"` +} + +type Source struct { + Locations map[string]*pb.Locations `json:"locations,omitempty"` + Infos []SourceInfo `json:"infos,omitempty"` +} + +type SourceInfo struct { + Filename string `json:"filename,omitempty"` + Language string `json:"language,omitempty"` + Data []byte `json:"data,omitempty"` + Definition []BuildStep `json:"llbDefinition,omitempty"` + DigestMapping map[digest.Digest]string `json:"digestMapping,omitempty"` +} + +type ImageSource struct { + Ref string + Platform *ocispecs.Platform + Digest digest.Digest + Local bool +} + +type GitSource struct { + URL string + Commit string +} + +type HTTPSource struct { + URL string + Digest digest.Digest +} + +type LocalSource struct { + Name string `json:"name"` +} + +type Secret struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type SSH struct { + ID string `json:"id"` + Optional bool `json:"optional,omitempty"` +} + +type Sources struct { + Images []ImageSource + Git []GitSource + HTTP []HTTPSource + Local []LocalSource +} + +const ( + ProvenanceSLSA1 = ProvenanceSLSA("v1") + ProvenanceSLSA02 = ProvenanceSLSA("v0.2") +) + +type ProvenanceSLSA string + +var provenanceSLSAs = []ProvenanceSLSA{ + ProvenanceSLSA1, + ProvenanceSLSA02, +} + +func (ps *ProvenanceSLSA) Validate() error { + if *ps == "" { + return errors.New("provenance SLSA version cannot be empty") + } + if slices.Contains(provenanceSLSAs, *ps) { + return nil + } + return errors.New("invalid provenance SLSA version: " + string(*ps)) +} + +type ProvenancePredicateSLSA02 struct { + slsa02.ProvenancePredicate + Invocation ProvenanceInvocationSLSA02 `json:"invocation,omitempty"` + BuildConfig *BuildConfig `json:"buildConfig,omitempty"` + Metadata *ProvenanceMetadataSLSA02 `json:"metadata,omitempty"` +} + +type ProvenanceInvocationSLSA02 struct { + ConfigSource slsa02.ConfigSource `json:"configSource,omitempty"` + Parameters Parameters `json:"parameters,omitempty"` + Environment Environment `json:"environment,omitempty"` +} + +type ProvenanceMetadataSLSA02 struct { + slsa02.ProvenanceMetadata + BuildKitMetadata BuildKitMetadata `json:"https://mobyproject.org/buildkit@v1#metadata,omitempty"` + Hermetic bool `json:"https://mobyproject.org/buildkit@v1#hermetic,omitempty"` +} + +type ProvenancePredicateSLSA1 struct { + slsa1.ProvenancePredicate + BuildDefinition ProvenanceBuildDefinitionSLSA1 `json:"buildDefinition,omitempty"` + RunDetails ProvenanceRunDetailsSLSA1 `json:"runDetails,omitempty"` +} + +type ProvenanceBuildDefinitionSLSA1 struct { + slsa1.ProvenanceBuildDefinition + ExternalParameters ProvenanceExternalParametersSLSA1 `json:"externalParameters,omitempty"` + InternalParameters ProvenanceInternalParametersSLSA1 `json:"internalParameters,omitempty"` +} + +type ProvenanceRunDetailsSLSA1 struct { + slsa1.ProvenanceRunDetails + Metadata *ProvenanceMetadataSLSA1 `json:"metadata,omitempty"` +} + +type ProvenanceExternalParametersSLSA1 struct { + ConfigSource ProvenanceConfigSourceSLSA1 `json:"configSource,omitempty"` + Request Parameters `json:"request,omitempty"` +} + +type ProvenanceConfigSourceSLSA1 struct { + URI string `json:"uri,omitempty"` + Digest slsa.DigestSet `json:"digest,omitempty"` + Path string `json:"path,omitempty"` +} + +type ProvenanceInternalParametersSLSA1 struct { + BuildConfig *BuildConfig `json:"buildConfig,omitempty"` + BuilderPlatform string `json:"builderPlatform"` +} + +type ProvenanceMetadataSLSA1 struct { + slsa1.BuildMetadata + BuildKitMetadata BuildKitMetadata `json:"buildkit_metadata,omitempty"` + Hermetic bool `json:"buildkit_hermetic,omitempty"` + Completeness BuildKitComplete `json:"buildkit_completeness,omitempty"` + Reproducible bool `json:"buildkit_reproducible,omitempty"` +} + +type Parameters struct { + Frontend string `json:"frontend,omitempty"` + Args map[string]string `json:"args,omitempty"` + Secrets []*Secret `json:"secrets,omitempty"` + SSH []*SSH `json:"ssh,omitempty"` + Locals []*LocalSource `json:"locals,omitempty"` + // TODO: select export attributes + // TODO: frontend inputs +} + +type Environment struct { + Platform string `json:"platform"` +} + +type BuildKitMetadata struct { + VCS map[string]string `json:"vcs,omitempty"` + Source *Source `json:"source,omitempty"` + Layers map[string][][]ocispecs.Descriptor `json:"layers,omitempty"` + SysUsage []*resourcestypes.SysSample `json:"sysUsage,omitempty"` +} + +type BuildKitComplete struct { + Request bool `json:"request"` + ResolvedDependencies bool `json:"resolvedDependencies"` +} + +// ConvertSLSA02ToSLSA1 converts a SLSA 0.2 provenance predicate to a SLSA 1.0 +// provenance predicate. +// FIXME: It should be the other way around when v1 is the default. +func ConvertSLSA02ToSLSA1(p02 *ProvenancePredicateSLSA02) *ProvenancePredicateSLSA1 { + if p02 == nil { + return nil + } + + var resolvedDeps []slsa1.ResourceDescriptor + for _, m := range p02.Materials { + resolvedDeps = append(resolvedDeps, slsa1.ResourceDescriptor{ + URI: m.URI, + Digest: m.Digest, + }) + } + + buildDef := ProvenanceBuildDefinitionSLSA1{ + ProvenanceBuildDefinition: slsa1.ProvenanceBuildDefinition{ + BuildType: "https://github.com/moby/buildkit/blob/master/docs/attestations/slsa-definitions.md", + ResolvedDependencies: resolvedDeps, + }, + ExternalParameters: ProvenanceExternalParametersSLSA1{ + ConfigSource: ProvenanceConfigSourceSLSA1{ + URI: p02.Invocation.ConfigSource.URI, + Digest: p02.Invocation.ConfigSource.Digest, + Path: p02.Invocation.ConfigSource.EntryPoint, + }, + Request: p02.Invocation.Parameters, + }, + InternalParameters: ProvenanceInternalParametersSLSA1{ + BuildConfig: p02.BuildConfig, + BuilderPlatform: p02.Invocation.Environment.Platform, + }, + } + + var meta *ProvenanceMetadataSLSA1 + if p02.Metadata != nil { + meta = &ProvenanceMetadataSLSA1{ + BuildMetadata: slsa1.BuildMetadata{ + InvocationID: p02.Metadata.BuildInvocationID, + StartedOn: p02.Metadata.BuildStartedOn, + FinishedOn: p02.Metadata.BuildFinishedOn, + }, + BuildKitMetadata: p02.Metadata.BuildKitMetadata, + Hermetic: p02.Metadata.Hermetic, + Completeness: BuildKitComplete{ + Request: p02.Metadata.Completeness.Parameters, + ResolvedDependencies: p02.Metadata.Completeness.Materials, + }, + Reproducible: p02.Metadata.Reproducible, + } + } + + runDetails := ProvenanceRunDetailsSLSA1{ + ProvenanceRunDetails: slsa1.ProvenanceRunDetails{ + Builder: slsa1.Builder{ + ID: p02.Builder.ID, + // TODO: handle builder components versions + // Version: map[string]string{ + // "buildkit": version.Version, + // }, + }, + }, + Metadata: meta, + } + + return &ProvenancePredicateSLSA1{ + BuildDefinition: buildDef, + RunDetails: runDetails, + } +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/attr.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/attr.go index 85e7cce60..1157d9877 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/attr.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/attr.go @@ -6,12 +6,16 @@ const AttrAuthHeaderSecret = "git.authheadersecret" const AttrAuthTokenSecret = "git.authtokensecret" const AttrKnownSSHHosts = "git.knownsshhosts" const AttrMountSSHSock = "git.mountsshsock" +const AttrGitChecksum = "git.checksum" + const AttrLocalSessionID = "local.session" const AttrLocalUniqueID = "local.unique" const AttrIncludePatterns = "local.includepattern" const AttrFollowPaths = "local.followpaths" const AttrExcludePatterns = "local.excludepatterns" const AttrSharedKeyHint = "local.sharedkeyhint" +const AttrMetadataTransfer = "local.metadatatransfer" +const AttrMetadataTransferExclude = "local.metadatatransferexclude" const AttrLLBDefinitionFilename = "llbbuild.filename" @@ -20,6 +24,8 @@ const AttrHTTPFilename = "http.filename" const AttrHTTPPerm = "http.perm" const AttrHTTPUID = "http.uid" const AttrHTTPGID = "http.gid" +const AttrHTTPAuthHeaderSecret = "http.authheadersecret" +const AttrHTTPHeaderPrefix = "http.header." const AttrImageResolveMode = "image.resolvemode" const AttrImageResolveModeDefault = "default" diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/caps.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/caps.go index 173791e4d..75c298ae0 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -21,6 +21,7 @@ const ( CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns" CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint" CapSourceLocalDiffer apicaps.CapID = "source.local.differ" + CapSourceMetadataTransfer apicaps.CapID = "source.local.metadatatransfer" CapSourceGit apicaps.CapID = "source.git" CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir" @@ -29,11 +30,15 @@ const ( CapSourceGitKnownSSHHosts apicaps.CapID = "source.git.knownsshhosts" CapSourceGitMountSSHSock apicaps.CapID = "source.git.mountsshsock" CapSourceGitSubdir apicaps.CapID = "source.git.subdir" + CapSourceGitChecksum apicaps.CapID = "source.git.checksum" CapSourceHTTP apicaps.CapID = "source.http" + CapSourceHTTPAuth apicaps.CapID = "source.http.auth" CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum" CapSourceHTTPPerm apicaps.CapID = "source.http.perm" - CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" + // NOTE the historical typo + CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" + CapSourceHTTPHeader apicaps.CapID = "source.http.header" CapSourceOCILayout apicaps.CapID = "source.ocilayout" @@ -92,6 +97,7 @@ const ( CapSourceDateEpoch apicaps.CapID = "exporter.sourcedateepoch" CapMultipleExporters apicaps.CapID = "exporter.multiple" + CapSessionExporter apicaps.CapID = "exporter.session" CapSourcePolicy apicaps.CapID = "source.policy" @@ -169,6 +175,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceMetadataTransfer, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceGit, Enabled: true, @@ -211,6 +223,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceGitChecksum, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceHTTP, Enabled: true, @@ -230,7 +248,7 @@ func init() { }) Caps.Init(apicaps.Cap{ - ID: CapSourceOCILayout, + ID: CapSourceHTTPAuth, Enabled: true, Status: apicaps.CapStatusExperimental, }) @@ -241,6 +259,18 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSourceHTTPHeader, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceOCILayout, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapBuildOpLLBFileName, Enabled: true, @@ -504,6 +534,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapSessionExporter, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourcePolicy, Enabled: true, diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index 4acac63c2..db85d91c2 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/solver/pb/ops.proto @@ -14,6 +14,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -279,14 +280,11 @@ func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { // Op represents a vertex of the LLB DAG. type Op struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // changes to this structure must be represented in json.go. // inputs is a set of input edges. Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` - // Types that are assignable to Op: + // Types that are valid to be assigned to Op: // // *Op_Exec // *Op_Source @@ -294,9 +292,11 @@ type Op struct { // *Op_Build // *Op_Merge // *Op_Diff - Op isOp_Op `protobuf_oneof:"op"` - Platform *Platform `protobuf:"bytes,10,opt,name=platform,proto3" json:"platform,omitempty"` - Constraints *WorkerConstraints `protobuf:"bytes,11,opt,name=constraints,proto3" json:"constraints,omitempty"` + Op isOp_Op `protobuf_oneof:"op"` + Platform *Platform `protobuf:"bytes,10,opt,name=platform,proto3" json:"platform,omitempty"` + Constraints *WorkerConstraints `protobuf:"bytes,11,opt,name=constraints,proto3" json:"constraints,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Op) Reset() { @@ -336,51 +336,63 @@ func (x *Op) GetInputs() []*Input { return nil } -func (m *Op) GetOp() isOp_Op { - if m != nil { - return m.Op +func (x *Op) GetOp() isOp_Op { + if x != nil { + return x.Op } return nil } func (x *Op) GetExec() *ExecOp { - if x, ok := x.GetOp().(*Op_Exec); ok { - return x.Exec + if x != nil { + if x, ok := x.Op.(*Op_Exec); ok { + return x.Exec + } } return nil } func (x *Op) GetSource() *SourceOp { - if x, ok := x.GetOp().(*Op_Source); ok { - return x.Source + if x != nil { + if x, ok := x.Op.(*Op_Source); ok { + return x.Source + } } return nil } func (x *Op) GetFile() *FileOp { - if x, ok := x.GetOp().(*Op_File); ok { - return x.File + if x != nil { + if x, ok := x.Op.(*Op_File); ok { + return x.File + } } return nil } func (x *Op) GetBuild() *BuildOp { - if x, ok := x.GetOp().(*Op_Build); ok { - return x.Build + if x != nil { + if x, ok := x.Op.(*Op_Build); ok { + return x.Build + } } return nil } func (x *Op) GetMerge() *MergeOp { - if x, ok := x.GetOp().(*Op_Merge); ok { - return x.Merge + if x != nil { + if x, ok := x.Op.(*Op_Merge); ok { + return x.Merge + } } return nil } func (x *Op) GetDiff() *DiffOp { - if x, ok := x.GetOp().(*Op_Diff); ok { - return x.Diff + if x != nil { + if x, ok := x.Op.(*Op_Diff); ok { + return x.Diff + } } return nil } @@ -441,15 +453,14 @@ func (*Op_Diff) isOp_Op() {} // Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform type Platform struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Architecture string `protobuf:"bytes,1,opt,name=Architecture,proto3" json:"Architecture,omitempty"` + OS string `protobuf:"bytes,2,opt,name=OS,proto3" json:"OS,omitempty"` + Variant string `protobuf:"bytes,3,opt,name=Variant,proto3" json:"Variant,omitempty"` + OSVersion string `protobuf:"bytes,4,opt,name=OSVersion,proto3" json:"OSVersion,omitempty"` + OSFeatures []string `protobuf:"bytes,5,rep,name=OSFeatures,proto3" json:"OSFeatures,omitempty"` // unused unknownFields protoimpl.UnknownFields - - Architecture string `protobuf:"bytes,1,opt,name=Architecture,proto3" json:"Architecture,omitempty"` - OS string `protobuf:"bytes,2,opt,name=OS,proto3" json:"OS,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=Variant,proto3" json:"Variant,omitempty"` - OSVersion string `protobuf:"bytes,4,opt,name=OSVersion,proto3" json:"OSVersion,omitempty"` - OSFeatures []string `protobuf:"bytes,5,rep,name=OSFeatures,proto3" json:"OSFeatures,omitempty"` // unused + sizeCache protoimpl.SizeCache } func (x *Platform) Reset() { @@ -519,14 +530,13 @@ func (x *Platform) GetOSFeatures() []string { // Input represents an input edge for an Op. type Input struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // digest of the marshaled input Op Digest string `protobuf:"bytes,1,opt,name=digest,proto3" json:"digest,omitempty"` // output index of the input Op - Index int64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + Index int64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Input) Reset() { @@ -575,16 +585,15 @@ func (x *Input) GetIndex() int64 { // ExecOp executes a command in a container. type ExecOp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` + Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` + Secretenv []*SecretEnv `protobuf:"bytes,5,rep,name=secretenv,proto3" json:"secretenv,omitempty"` + CdiDevices []*CDIDevice `protobuf:"bytes,6,rep,name=cdiDevices,proto3" json:"cdiDevices,omitempty"` unknownFields protoimpl.UnknownFields - - Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` - Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` - Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` - Secretenv []*SecretEnv `protobuf:"bytes,5,rep,name=secretenv,proto3" json:"secretenv,omitempty"` - CdiDevices []*CDIDevice `protobuf:"bytes,6,rep,name=cdiDevices,proto3" json:"cdiDevices,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ExecOp) Reset() { @@ -663,21 +672,20 @@ func (x *ExecOp) GetCdiDevices() []*CDIDevice { // Meta is unrelated to LLB metadata. // FIXME: rename (ExecContext? ExecArgs?) type Meta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` - Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` - ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` - ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` - Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` - Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` - CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` - RemoveMountStubsRecursive bool `protobuf:"varint,11,opt,name=removeMountStubsRecursive,proto3" json:"removeMountStubsRecursive,omitempty"` - ValidExitCodes []int32 `protobuf:"varint,12,rep,packed,name=validExitCodes,proto3" json:"validExitCodes,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` + ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` + CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` + RemoveMountStubsRecursive bool `protobuf:"varint,11,opt,name=removeMountStubsRecursive,proto3" json:"removeMountStubsRecursive,omitempty"` + ValidExitCodes []int32 `protobuf:"varint,12,rep,packed,name=validExitCodes,proto3" json:"validExitCodes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Meta) Reset() { @@ -788,12 +796,11 @@ func (x *Meta) GetValidExitCodes() []int32 { } type HostIP struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HostIP) Reset() { @@ -841,13 +848,12 @@ func (x *HostIP) GetIP() string { } type Ulimit struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Soft int64 `protobuf:"varint,2,opt,name=Soft,proto3" json:"Soft,omitempty"` + Hard int64 `protobuf:"varint,3,opt,name=Hard,proto3" json:"Hard,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Soft int64 `protobuf:"varint,2,opt,name=Soft,proto3" json:"Soft,omitempty"` - Hard int64 `protobuf:"varint,3,opt,name=Hard,proto3" json:"Hard,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Ulimit) Reset() { @@ -903,13 +909,12 @@ func (x *Ulimit) GetHard() int64 { // SecretEnv is an environment variable that is backed by a secret. type SecretEnv struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Optional bool `protobuf:"varint,3,opt,name=optional,proto3" json:"optional,omitempty"` unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Optional bool `protobuf:"varint,3,opt,name=optional,proto3" json:"optional,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecretEnv) Reset() { @@ -965,15 +970,14 @@ func (x *SecretEnv) GetOptional() bool { // CDIDevice specifies a CDI device information. type CDIDevice struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Fully qualified CDI device name (e.g., vendor.com/gpu=gpudevice1) // https://github.com/cncf-tags/container-device-interface/blob/main/SPEC.md Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Optional defines if CDI device is required. - Optional bool `protobuf:"varint,2,opt,name=optional,proto3" json:"optional,omitempty"` + Optional bool `protobuf:"varint,2,opt,name=optional,proto3" json:"optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CDIDevice) Reset() { @@ -1022,22 +1026,21 @@ func (x *CDIDevice) GetOptional() bool { // Mount specifies how to mount an input Op as a filesystem. type Mount struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` + Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` + Output int64 `protobuf:"varint,4,opt,name=output,proto3" json:"output,omitempty"` + Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` + MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` + TmpfsOpt *TmpfsOpt `protobuf:"bytes,19,opt,name=TmpfsOpt,proto3" json:"TmpfsOpt,omitempty"` + CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` + SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` + SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` + ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"` + ContentCache MountContentCache `protobuf:"varint,24,opt,name=contentCache,proto3,enum=pb.MountContentCache" json:"contentCache,omitempty"` unknownFields protoimpl.UnknownFields - - Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` - Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` - Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` - Output int64 `protobuf:"varint,4,opt,name=output,proto3" json:"output,omitempty"` - Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` - MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` - TmpfsOpt *TmpfsOpt `protobuf:"bytes,19,opt,name=TmpfsOpt,proto3" json:"TmpfsOpt,omitempty"` - CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` - SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` - SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` - ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"` - ContentCache MountContentCache `protobuf:"varint,24,opt,name=contentCache,proto3,enum=pb.MountContentCache" json:"contentCache,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Mount) Reset() { @@ -1156,12 +1159,11 @@ func (x *Mount) GetContentCache() MountContentCache { // TmpfsOpt defines options describing tpmfs mounts type TmpfsOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Specify an upper limit on the size of the filesystem. - Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + Size int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TmpfsOpt) Reset() { @@ -1203,14 +1205,13 @@ func (x *TmpfsOpt) GetSize() int64 { // CacheOpt defines options specific to cache mounts type CacheOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // ID is an optional namespace for the mount ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // Sharing is the sharing mode for the mount - Sharing CacheSharingOpt `protobuf:"varint,2,opt,name=sharing,proto3,enum=pb.CacheSharingOpt" json:"sharing,omitempty"` + Sharing CacheSharingOpt `protobuf:"varint,2,opt,name=sharing,proto3,enum=pb.CacheSharingOpt" json:"sharing,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CacheOpt) Reset() { @@ -1259,10 +1260,7 @@ func (x *CacheOpt) GetSharing() CacheSharingOpt { // SecretOpt defines options describing secret mounts type SecretOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // ID of secret. Used for quering the value. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // UID of secret file @@ -1273,7 +1271,9 @@ type SecretOpt struct { Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` // Optional defines if secret value is required. Error is produced // if value is not found and optional is false. - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SecretOpt) Reset() { @@ -1343,10 +1343,7 @@ func (x *SecretOpt) GetOptional() bool { // SSHOpt defines options describing ssh mounts type SSHOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // ID of exposed ssh rule. Used for quering the value. ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` // UID of agent socket @@ -1357,7 +1354,9 @@ type SSHOpt struct { Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` // Optional defines if ssh socket is required. Error is produced // if client does not expose ssh. - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SSHOpt) Reset() { @@ -1427,15 +1426,14 @@ func (x *SSHOpt) GetOptional() bool { // SourceOp specifies a source such as build contexts and images. type SourceOp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // TODO: use source type or any type instead of URL protocol. // identifier e.g. local://, docker-image://, git://, https://... Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // attrs are defined in attr.go - Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SourceOp) Reset() { @@ -1485,14 +1483,13 @@ func (x *SourceOp) GetAttrs() map[string]string { // BuildOp is used for nested build invocation. // BuildOp is experimental and can break without backwards compatibility type BuildOp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Builder int64 `protobuf:"varint,1,opt,name=builder,proto3" json:"builder,omitempty"` + Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Def *Definition `protobuf:"bytes,3,opt,name=def,proto3" json:"def,omitempty"` + Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // outputs unknownFields protoimpl.UnknownFields - - Builder int64 `protobuf:"varint,1,opt,name=builder,proto3" json:"builder,omitempty"` - Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Def *Definition `protobuf:"bytes,3,opt,name=def,proto3" json:"def,omitempty"` - Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // outputs + sizeCache protoimpl.SizeCache } func (x *BuildOp) Reset() { @@ -1555,11 +1552,10 @@ func (x *BuildOp) GetAttrs() map[string]string { // BuildInput is used for BuildOp. type BuildInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` unknownFields protoimpl.UnknownFields - - Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` + sizeCache protoimpl.SizeCache } func (x *BuildInput) Reset() { @@ -1601,19 +1597,18 @@ func (x *BuildInput) GetInput() int64 { // OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. type OpMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // ignore_cache specifies to ignore the cache for this Op. IgnoreCache bool `protobuf:"varint,1,opt,name=ignore_cache,json=ignoreCache,proto3" json:"ignore_cache,omitempty"` // Description can be used for keeping any text fields that builder doesn't parse - Description map[string]string `protobuf:"bytes,2,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Description map[string]string `protobuf:"bytes,2,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // index 3 reserved for WorkerConstraint in previous versions // WorkerConstraint worker_constraint = 3; ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache,proto3" json:"export_cache,omitempty"` - Caps map[string]bool `protobuf:"bytes,5,rep,name=caps,proto3" json:"caps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + Caps map[string]bool `protobuf:"bytes,5,rep,name=caps,proto3" json:"caps,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` ProgressGroup *ProgressGroup `protobuf:"bytes,6,opt,name=progress_group,json=progressGroup,proto3" json:"progress_group,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *OpMetadata) Reset() { @@ -1683,12 +1678,11 @@ func (x *OpMetadata) GetProgressGroup() *ProgressGroup { // Source is a source mapping description for a file type Source struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Locations map[string]*Locations `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Infos []*SourceInfo `protobuf:"bytes,2,rep,name=infos,proto3" json:"infos,omitempty"` unknownFields protoimpl.UnknownFields - - Locations map[string]*Locations `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Infos []*SourceInfo `protobuf:"bytes,2,rep,name=infos,proto3" json:"infos,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Source) Reset() { @@ -1737,11 +1731,10 @@ func (x *Source) GetInfos() []*SourceInfo { // Locations is a list of ranges with a index to its source map. type Locations struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` unknownFields protoimpl.UnknownFields - - Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Locations) Reset() { @@ -1783,14 +1776,13 @@ func (x *Locations) GetLocations() []*Location { // Source info contains the shared metadata of a source mapping type SourceInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Definition *Definition `protobuf:"bytes,3,opt,name=definition,proto3" json:"definition,omitempty"` + Language string `protobuf:"bytes,4,opt,name=language,proto3" json:"language,omitempty"` unknownFields protoimpl.UnknownFields - - Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Definition *Definition `protobuf:"bytes,3,opt,name=definition,proto3" json:"definition,omitempty"` - Language string `protobuf:"bytes,4,opt,name=language,proto3" json:"language,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SourceInfo) Reset() { @@ -1853,12 +1845,11 @@ func (x *SourceInfo) GetLanguage() string { // Location defines list of areas in to source file type Location struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + SourceIndex int32 `protobuf:"varint,1,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` + Ranges []*Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` unknownFields protoimpl.UnknownFields - - SourceIndex int32 `protobuf:"varint,1,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` - Ranges []*Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Location) Reset() { @@ -1907,12 +1898,11 @@ func (x *Location) GetRanges() []*Range { // Range is an area in the source file type Range struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Start *Position `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + End *Position `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` unknownFields protoimpl.UnknownFields - - Start *Position `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` - End *Position `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Range) Reset() { @@ -1961,12 +1951,11 @@ func (x *Range) GetEnd() *Position { // Position is single location in a source file type Position struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` + Character int32 `protobuf:"varint,2,opt,name=character,proto3" json:"character,omitempty"` unknownFields protoimpl.UnknownFields - - Line int32 `protobuf:"varint,1,opt,name=line,proto3" json:"line,omitempty"` - Character int32 `protobuf:"varint,2,opt,name=character,proto3" json:"character,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Position) Reset() { @@ -2014,11 +2003,10 @@ func (x *Position) GetCharacter() int32 { } type ExportCache struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` unknownFields protoimpl.UnknownFields - - Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ExportCache) Reset() { @@ -2059,13 +2047,12 @@ func (x *ExportCache) GetValue() bool { } type ProgressGroup struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Weak bool `protobuf:"varint,3,opt,name=weak,proto3" json:"weak,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Weak bool `protobuf:"varint,3,opt,name=weak,proto3" json:"weak,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProgressGroup) Reset() { @@ -2120,15 +2107,14 @@ func (x *ProgressGroup) GetWeak() bool { } type ProxyEnv struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` + HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` + FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` + NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` + AllProxy string `protobuf:"bytes,5,opt,name=all_proxy,json=allProxy,proto3" json:"all_proxy,omitempty"` unknownFields protoimpl.UnknownFields - - HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` - HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` - FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` - NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` - AllProxy string `protobuf:"bytes,5,opt,name=all_proxy,json=allProxy,proto3" json:"all_proxy,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProxyEnv) Reset() { @@ -2198,11 +2184,10 @@ func (x *ProxyEnv) GetAllProxy() string { // WorkerConstraints defines conditions for the worker type WorkerConstraints struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` // containerd-style filter unknownFields protoimpl.UnknownFields - - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` // containerd-style filter + sizeCache protoimpl.SizeCache } func (x *WorkerConstraints) Reset() { @@ -2244,17 +2229,16 @@ func (x *WorkerConstraints) GetFilter() []string { // Definition is the LLB definition structure with per-vertex metadata entries type Definition struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // def is a list of marshaled Op messages Def [][]byte `protobuf:"bytes,1,rep,name=def,proto3" json:"def,omitempty"` // metadata contains metadata for the each of the Op messages. // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. - Metadata map[string]*OpMetadata `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Metadata map[string]*OpMetadata `protobuf:"bytes,2,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Source contains the source mapping information for the vertexes in the definition - Source *Source `protobuf:"bytes,3,opt,name=Source,proto3" json:"Source,omitempty"` + Source *Source `protobuf:"bytes,3,opt,name=Source,proto3" json:"Source,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Definition) Reset() { @@ -2309,11 +2293,10 @@ func (x *Definition) GetSource() *Source { } type FileOp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` unknownFields protoimpl.UnknownFields - - Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FileOp) Reset() { @@ -2354,22 +2337,21 @@ func (x *FileOp) GetActions() []*FileAction { } type FileAction struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // changes to this structure must be represented in json.go. Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` // could be real input or target (target index + max input index) SecondaryInput int64 `protobuf:"varint,2,opt,name=secondaryInput,proto3" json:"secondaryInput,omitempty"` // --//-- Output int64 `protobuf:"varint,3,opt,name=output,proto3" json:"output,omitempty"` - // Types that are assignable to Action: + // Types that are valid to be assigned to Action: // // *FileAction_Copy // *FileAction_Mkfile // *FileAction_Mkdir // *FileAction_Rm // *FileAction_Symlink - Action isFileAction_Action `protobuf_oneof:"action"` + Action isFileAction_Action `protobuf_oneof:"action"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileAction) Reset() { @@ -2423,44 +2405,54 @@ func (x *FileAction) GetOutput() int64 { return 0 } -func (m *FileAction) GetAction() isFileAction_Action { - if m != nil { - return m.Action +func (x *FileAction) GetAction() isFileAction_Action { + if x != nil { + return x.Action } return nil } func (x *FileAction) GetCopy() *FileActionCopy { - if x, ok := x.GetAction().(*FileAction_Copy); ok { - return x.Copy + if x != nil { + if x, ok := x.Action.(*FileAction_Copy); ok { + return x.Copy + } } return nil } func (x *FileAction) GetMkfile() *FileActionMkFile { - if x, ok := x.GetAction().(*FileAction_Mkfile); ok { - return x.Mkfile + if x != nil { + if x, ok := x.Action.(*FileAction_Mkfile); ok { + return x.Mkfile + } } return nil } func (x *FileAction) GetMkdir() *FileActionMkDir { - if x, ok := x.GetAction().(*FileAction_Mkdir); ok { - return x.Mkdir + if x != nil { + if x, ok := x.Action.(*FileAction_Mkdir); ok { + return x.Mkdir + } } return nil } func (x *FileAction) GetRm() *FileActionRm { - if x, ok := x.GetAction().(*FileAction_Rm); ok { - return x.Rm + if x != nil { + if x, ok := x.Action.(*FileAction_Rm); ok { + return x.Rm + } } return nil } func (x *FileAction) GetSymlink() *FileActionSymlink { - if x, ok := x.GetAction().(*FileAction_Symlink); ok { - return x.Symlink + if x != nil { + if x, ok := x.Action.(*FileAction_Symlink); ok { + return x.Symlink + } } return nil } @@ -2505,10 +2497,7 @@ func (*FileAction_Rm) isFileAction_Action() {} func (*FileAction_Symlink) isFileAction_Action() {} type FileActionCopy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // src is the source path Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` // dest path @@ -2538,7 +2527,9 @@ type FileActionCopy struct { // alwaysReplaceExistingDestPaths results in an existing dest path that differs in type from the src path being replaced rather than the default of returning an error AlwaysReplaceExistingDestPaths bool `protobuf:"varint,14,opt,name=alwaysReplaceExistingDestPaths,proto3" json:"alwaysReplaceExistingDestPaths,omitempty"` // mode in non-octal format - ModeStr string `protobuf:"bytes,15,opt,name=modeStr,proto3" json:"modeStr,omitempty"` + ModeStr string `protobuf:"bytes,15,opt,name=modeStr,proto3" json:"modeStr,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileActionCopy) Reset() { @@ -2677,10 +2668,7 @@ func (x *FileActionCopy) GetModeStr() string { } type FileActionMkFile struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // path for the new file Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // permission bits @@ -2690,7 +2678,9 @@ type FileActionMkFile struct { // optional owner for the new file Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` // optional created time override - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileActionMkFile) Reset() { @@ -2759,10 +2749,7 @@ func (x *FileActionMkFile) GetTimestamp() int64 { } type FileActionSymlink struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // destination path for the new file representing the link Oldpath string `protobuf:"bytes,1,opt,name=oldpath,proto3" json:"oldpath,omitempty"` // source path for the link @@ -2770,7 +2757,9 @@ type FileActionSymlink struct { // optional owner for the new file Owner *ChownOpt `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` // optional created time override - Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileActionSymlink) Reset() { @@ -2832,10 +2821,7 @@ func (x *FileActionSymlink) GetTimestamp() int64 { } type FileActionMkDir struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // path for the new directory Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // permission bits @@ -2845,7 +2831,9 @@ type FileActionMkDir struct { // optional owner for the new directory Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` // optional created time override - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileActionMkDir) Reset() { @@ -2914,16 +2902,15 @@ func (x *FileActionMkDir) GetTimestamp() int64 { } type FileActionRm struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // path to remove Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` // allowNotFound doesn't fail the rm if file is not found AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` // allowWildcard allows filepath.Match wildcards in path AllowWildcard bool `protobuf:"varint,3,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *FileActionRm) Reset() { @@ -2978,12 +2965,11 @@ func (x *FileActionRm) GetAllowWildcard() bool { } type ChownOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + User *UserOpt `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + Group *UserOpt `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` unknownFields protoimpl.UnknownFields - - User *UserOpt `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` - Group *UserOpt `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ChownOpt) Reset() { @@ -3031,17 +3017,16 @@ func (x *ChownOpt) GetGroup() *UserOpt { } type UserOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // changes to this structure must be represented in json.go. // - // Types that are assignable to User: + // Types that are valid to be assigned to User: // // *UserOpt_ByName // *UserOpt_ByID - User isUserOpt_User `protobuf_oneof:"user"` + User isUserOpt_User `protobuf_oneof:"user"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *UserOpt) Reset() { @@ -3074,23 +3059,27 @@ func (*UserOpt) Descriptor() ([]byte, []int) { return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP(), []int{37} } -func (m *UserOpt) GetUser() isUserOpt_User { - if m != nil { - return m.User +func (x *UserOpt) GetUser() isUserOpt_User { + if x != nil { + return x.User } return nil } func (x *UserOpt) GetByName() *NamedUserOpt { - if x, ok := x.GetUser().(*UserOpt_ByName); ok { - return x.ByName + if x != nil { + if x, ok := x.User.(*UserOpt_ByName); ok { + return x.ByName + } } return nil } func (x *UserOpt) GetByID() uint32 { - if x, ok := x.GetUser().(*UserOpt_ByID); ok { - return x.ByID + if x != nil { + if x, ok := x.User.(*UserOpt_ByID); ok { + return x.ByID + } } return 0 } @@ -3112,12 +3101,11 @@ func (*UserOpt_ByName) isUserOpt_User() {} func (*UserOpt_ByID) isUserOpt_User() {} type NamedUserOpt struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Input int64 `protobuf:"varint,2,opt,name=input,proto3" json:"input,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Input int64 `protobuf:"varint,2,opt,name=input,proto3" json:"input,omitempty"` + sizeCache protoimpl.SizeCache } func (x *NamedUserOpt) Reset() { @@ -3165,11 +3153,10 @@ func (x *NamedUserOpt) GetInput() int64 { } type MergeInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` unknownFields protoimpl.UnknownFields - - Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MergeInput) Reset() { @@ -3210,11 +3197,10 @@ func (x *MergeInput) GetInput() int64 { } type MergeOp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Inputs []*MergeInput `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` unknownFields protoimpl.UnknownFields - - Inputs []*MergeInput `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MergeOp) Reset() { @@ -3255,11 +3241,10 @@ func (x *MergeOp) GetInputs() []*MergeInput { } type LowerDiffInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` unknownFields protoimpl.UnknownFields - - Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` + sizeCache protoimpl.SizeCache } func (x *LowerDiffInput) Reset() { @@ -3300,11 +3285,10 @@ func (x *LowerDiffInput) GetInput() int64 { } type UpperDiffInput struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` unknownFields protoimpl.UnknownFields - - Input int64 `protobuf:"varint,1,opt,name=input,proto3" json:"input,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UpperDiffInput) Reset() { @@ -3345,12 +3329,11 @@ func (x *UpperDiffInput) GetInput() int64 { } type DiffOp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Lower *LowerDiffInput `protobuf:"bytes,1,opt,name=lower,proto3" json:"lower,omitempty"` + Upper *UpperDiffInput `protobuf:"bytes,2,opt,name=upper,proto3" json:"upper,omitempty"` unknownFields protoimpl.UnknownFields - - Lower *LowerDiffInput `protobuf:"bytes,1,opt,name=lower,proto3" json:"lower,omitempty"` - Upper *UpperDiffInput `protobuf:"bytes,2,opt,name=upper,proto3" json:"upper,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DiffOp) Reset() { @@ -3399,430 +3382,299 @@ func (x *DiffOp) GetUpper() *UpperDiffInput { var File_github_com_moby_buildkit_solver_pb_ops_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc = []byte{ - 0x0a, 0x2c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, - 0x72, 0x2f, 0x70, 0x62, 0x2f, 0x6f, 0x70, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, - 0x70, 0x62, 0x22, 0xe8, 0x02, 0x0a, 0x02, 0x4f, 0x70, 0x12, 0x21, 0x0a, 0x06, 0x69, 0x6e, 0x70, - 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x04, - 0x65, 0x78, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, - 0x45, 0x78, 0x65, 0x63, 0x4f, 0x70, 0x48, 0x00, 0x52, 0x04, 0x65, 0x78, 0x65, 0x63, 0x12, 0x26, - 0x0a, 0x06, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x48, 0x00, 0x52, 0x06, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, - 0x48, 0x00, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, - 0x6c, 0x64, 0x4f, 0x70, 0x48, 0x00, 0x52, 0x05, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x12, 0x23, 0x0a, - 0x05, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, - 0x62, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x4f, 0x70, 0x48, 0x00, 0x52, 0x05, 0x6d, 0x65, 0x72, - 0x67, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x64, 0x69, 0x66, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x48, 0x00, 0x52, 0x04, - 0x64, 0x69, 0x66, 0x66, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x6c, 0x61, 0x74, - 0x66, 0x6f, 0x72, 0x6d, 0x52, 0x08, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x37, - 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, - 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, - 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x42, 0x04, 0x0a, 0x02, 0x6f, 0x70, 0x22, 0x96, 0x01, - 0x0a, 0x08, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x72, - 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0c, 0x41, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x4f, 0x53, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x4f, 0x53, 0x12, 0x18, - 0x0a, 0x07, 0x56, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x56, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x4f, 0x53, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x4f, 0x53, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x0a, 0x0a, 0x4f, 0x53, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x4f, 0x53, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0x35, 0x0a, 0x05, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, - 0x16, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x22, 0xfa, 0x01, - 0x0a, 0x06, 0x45, 0x78, 0x65, 0x63, 0x4f, 0x70, 0x12, 0x1c, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x74, 0x61, - 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x06, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x07, 0x6e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, - 0x4e, 0x65, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x12, 0x2c, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x2b, - 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x45, 0x6e, 0x76, - 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x76, 0x12, 0x2d, 0x0a, 0x0a, 0x63, - 0x64, 0x69, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x52, 0x0a, - 0x63, 0x64, 0x69, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x22, 0xf3, 0x02, 0x0a, 0x04, 0x4d, - 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x61, 0x72, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x77, 0x64, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x63, 0x77, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, - 0x73, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, - 0x29, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, 0x12, 0x2a, 0x0a, 0x0a, 0x65, 0x78, - 0x74, 0x72, 0x61, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, - 0x2e, 0x70, 0x62, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x48, 0x6f, 0x73, 0x74, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x06, 0x75, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x09, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x52, 0x06, - 0x75, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x22, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x63, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x19, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x75, 0x62, 0x73, 0x52, 0x65, - 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x72, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x53, 0x74, 0x75, 0x62, 0x73, 0x52, - 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x05, - 0x52, 0x0e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x45, 0x78, 0x69, 0x74, 0x43, 0x6f, 0x64, 0x65, 0x73, - 0x22, 0x2c, 0x0a, 0x06, 0x48, 0x6f, 0x73, 0x74, 0x49, 0x50, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, - 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x0e, - 0x0a, 0x02, 0x49, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x50, 0x22, 0x44, - 0x0a, 0x06, 0x55, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x53, 0x6f, 0x66, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x53, 0x6f, 0x66, 0x74, - 0x12, 0x12, 0x0a, 0x04, 0x48, 0x61, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, - 0x48, 0x61, 0x72, 0x64, 0x22, 0x4b, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, 0x45, 0x6e, - 0x76, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, - 0x44, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x22, 0x3b, 0x0a, 0x09, 0x43, 0x44, 0x49, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xaa, - 0x03, 0x0a, 0x05, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x1a, - 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x65, - 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x73, 0x74, 0x12, 0x16, - 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, - 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, - 0x6c, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x6f, 0x6e, - 0x6c, 0x79, 0x12, 0x2b, 0x0a, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, 0x74, - 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x28, 0x0a, 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x18, 0x13, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x52, - 0x08, 0x54, 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x12, 0x28, 0x0a, 0x08, 0x63, 0x61, 0x63, - 0x68, 0x65, 0x4f, 0x70, 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, - 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x52, 0x08, 0x63, 0x61, 0x63, 0x68, 0x65, - 0x4f, 0x70, 0x74, 0x12, 0x2b, 0x0a, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x70, 0x74, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x4f, 0x70, 0x74, 0x52, 0x09, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x4f, 0x70, 0x74, - 0x12, 0x22, 0x0a, 0x06, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x53, 0x48, 0x4f, 0x70, 0x74, 0x52, 0x06, 0x53, 0x53, - 0x48, 0x4f, 0x70, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x44, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x49, 0x44, - 0x12, 0x39, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x18, 0x18, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x6f, 0x75, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x0c, 0x63, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x22, 0x1e, 0x0a, 0x08, 0x54, - 0x6d, 0x70, 0x66, 0x73, 0x4f, 0x70, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x22, 0x49, 0x0a, 0x08, 0x43, - 0x61, 0x63, 0x68, 0x65, 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x2d, 0x0a, 0x07, 0x73, 0x68, 0x61, 0x72, 0x69, - 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x61, - 0x63, 0x68, 0x65, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x52, 0x07, 0x73, - 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x22, 0x6f, 0x0a, 0x09, 0x53, 0x65, 0x63, 0x72, 0x65, 0x74, - 0x4f, 0x70, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x6c, 0x0a, 0x06, 0x53, 0x53, 0x48, 0x4f, 0x70, - 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, - 0x44, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, - 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x67, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x03, 0x67, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x93, 0x01, 0x0a, 0x08, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4f, 0x70, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x12, 0x2d, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4f, 0x70, 0x2e, - 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x61, 0x74, 0x74, 0x72, - 0x73, 0x1a, 0x38, 0x0a, 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xa9, 0x02, 0x0a, 0x07, - 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x65, - 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, 0x2e, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x73, 0x12, 0x20, 0x0a, 0x03, 0x64, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, - 0x03, 0x64, 0x65, 0x66, 0x12, 0x2c, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x4f, 0x70, - 0x2e, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x61, 0x74, 0x74, - 0x72, 0x73, 0x1a, 0x49, 0x0a, 0x0b, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x49, 0x6e, 0x70, - 0x75, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x38, 0x0a, - 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x75, 0x69, 0x6c, 0x64, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x87, 0x03, 0x0a, 0x0a, - 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x67, - 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x41, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x32, 0x0a, 0x0c, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x5f, 0x63, 0x61, 0x63, 0x68, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x70, 0x62, 0x2e, 0x45, 0x78, 0x70, 0x6f, - 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x0b, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, - 0x61, 0x63, 0x68, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x63, 0x61, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x62, 0x2e, 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x63, 0x61, - 0x70, 0x73, 0x12, 0x38, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x5f, 0x67, - 0x72, 0x6f, 0x75, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x70, 0x62, 0x2e, - 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x0d, 0x70, - 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x1a, 0x3e, 0x0a, 0x10, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x37, 0x0a, 0x09, - 0x43, 0x61, 0x70, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb4, 0x01, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x12, 0x37, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, - 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, - 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x24, 0x0a, 0x05, 0x69, 0x6e, 0x66, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x05, 0x69, 0x6e, 0x66, 0x6f, 0x73, 0x1a, - 0x4b, 0x0a, 0x0e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x23, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x37, 0x0a, 0x09, - 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x0a, 0x09, 0x6c, 0x6f, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, - 0x62, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x88, 0x01, 0x0a, 0x0a, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x0a, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, - 0x22, 0x4f, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x21, - 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x09, - 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x67, 0x65, - 0x73, 0x22, 0x4b, 0x0a, 0x05, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x50, - 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1e, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, - 0x2e, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x3c, - 0x0a, 0x08, 0x50, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6c, 0x69, - 0x6e, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x09, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, 0x74, 0x65, 0x72, 0x22, 0x23, 0x0a, 0x0b, - 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x22, 0x47, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x47, 0x72, 0x6f, - 0x75, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x22, 0x9f, 0x01, 0x0a, 0x08, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x45, 0x6e, 0x76, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x74, 0x74, 0x70, 0x5f, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x68, 0x74, 0x74, - 0x70, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x74, 0x74, 0x70, 0x73, 0x5f, - 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x74, 0x70, 0x5f, 0x70, - 0x72, 0x6f, 0x78, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x74, 0x70, 0x50, - 0x72, 0x6f, 0x78, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6e, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x6f, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x12, - 0x1b, 0x0a, 0x09, 0x61, 0x6c, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x61, 0x6c, 0x6c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x22, 0x2b, 0x0a, 0x11, - 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, - 0x73, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xc9, 0x01, 0x0a, 0x0a, 0x44, 0x65, - 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x64, 0x65, 0x66, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x03, 0x64, 0x65, 0x66, 0x12, 0x38, 0x0a, 0x08, 0x6d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x70, 0x62, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x4b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x24, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, - 0x4f, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x32, 0x0a, 0x06, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x12, - 0x28, 0x0a, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xca, 0x02, 0x0a, 0x0a, 0x46, 0x69, - 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x26, - 0x0a, 0x0e, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x61, 0x72, - 0x79, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x28, - 0x0a, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, - 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x70, 0x79, - 0x48, 0x00, 0x52, 0x04, 0x63, 0x6f, 0x70, 0x79, 0x12, 0x2e, 0x0a, 0x06, 0x6d, 0x6b, 0x66, 0x69, - 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, - 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x00, - 0x52, 0x06, 0x6d, 0x6b, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x6d, 0x6b, 0x64, 0x69, - 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, - 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x44, 0x69, 0x72, 0x48, 0x00, 0x52, 0x05, - 0x6d, 0x6b, 0x64, 0x69, 0x72, 0x12, 0x22, 0x0a, 0x02, 0x72, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x6d, 0x48, 0x00, 0x52, 0x02, 0x72, 0x6d, 0x12, 0x31, 0x0a, 0x07, 0x73, 0x79, 0x6d, - 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x62, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, - 0x6b, 0x48, 0x00, 0x52, 0x07, 0x73, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x42, 0x08, 0x0a, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x04, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x70, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x73, 0x72, 0x63, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x73, 0x72, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x64, - 0x65, 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x73, 0x74, 0x12, - 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x52, 0x05, 0x6f, 0x77, - 0x6e, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, - 0x77, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, - 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, 0x12, 0x28, 0x0a, - 0x0f, 0x64, 0x69, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x64, 0x69, 0x72, 0x43, 0x6f, 0x70, 0x79, 0x43, - 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, - 0x70, 0x74, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, - 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x55, 0x6e, 0x70, 0x61, 0x63, 0x6b, - 0x44, 0x6f, 0x63, 0x6b, 0x65, 0x72, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x69, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x79, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x74, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0d, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, - 0x64, 0x12, 0x2e, 0x0a, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x57, - 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, - 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x29, 0x0a, 0x10, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, - 0x72, 0x6e, 0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x6e, 0x63, 0x6c, 0x75, - 0x64, 0x65, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x78, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x73, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x50, 0x61, 0x74, - 0x74, 0x65, 0x72, 0x6e, 0x73, 0x12, 0x46, 0x0a, 0x1e, 0x61, 0x6c, 0x77, 0x61, 0x79, 0x73, 0x52, - 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1e, 0x61, - 0x6c, 0x77, 0x61, 0x79, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x45, 0x78, 0x69, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x73, 0x74, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6d, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x72, 0x22, 0x90, 0x01, 0x0a, 0x10, 0x46, 0x69, 0x6c, 0x65, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, - 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, - 0x77, 0x6e, 0x4f, 0x70, 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x89, 0x01, 0x0a, 0x11, 0x46, - 0x69, 0x6c, 0x65, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6d, 0x6c, 0x69, 0x6e, 0x6b, - 0x12, 0x18, 0x0a, 0x07, 0x6f, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x6f, 0x6c, 0x64, 0x70, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, - 0x77, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x77, - 0x70, 0x61, 0x74, 0x68, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, - 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x9d, 0x01, 0x0a, 0x0f, 0x46, 0x69, 0x6c, 0x65, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x6b, 0x44, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, - 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x12, - 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x6d, 0x6f, - 0x64, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x6d, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6d, 0x61, 0x6b, 0x65, 0x50, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, 0x70, - 0x74, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x6e, 0x0a, 0x0c, 0x46, 0x69, 0x6c, 0x65, 0x41, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, - 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, 0x6c, 0x64, 0x63, 0x61, 0x72, - 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x57, 0x69, - 0x6c, 0x64, 0x63, 0x61, 0x72, 0x64, 0x22, 0x4e, 0x0a, 0x08, 0x43, 0x68, 0x6f, 0x77, 0x6e, 0x4f, - 0x70, 0x74, 0x12, 0x1f, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x52, 0x04, 0x75, - 0x73, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x52, - 0x05, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x22, 0x53, 0x0a, 0x07, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, - 0x74, 0x12, 0x2a, 0x0a, 0x06, 0x62, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, - 0x4f, 0x70, 0x74, 0x48, 0x00, 0x52, 0x06, 0x62, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, - 0x04, 0x62, 0x79, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x04, 0x62, - 0x79, 0x49, 0x44, 0x42, 0x06, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x22, 0x38, 0x0a, 0x0c, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x55, 0x73, 0x65, 0x72, 0x4f, 0x70, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, - 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x22, 0x0a, 0x0a, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x31, 0x0a, 0x07, 0x4d, 0x65, 0x72, - 0x67, 0x65, 0x4f, 0x70, 0x12, 0x26, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x49, - 0x6e, 0x70, 0x75, 0x74, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x22, 0x26, 0x0a, 0x0e, - 0x4c, 0x6f, 0x77, 0x65, 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x22, 0x26, 0x0a, 0x0e, 0x55, 0x70, 0x70, 0x65, 0x72, 0x44, 0x69, 0x66, - 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x22, 0x5c, 0x0a, 0x06, - 0x44, 0x69, 0x66, 0x66, 0x4f, 0x70, 0x12, 0x28, 0x0a, 0x05, 0x6c, 0x6f, 0x77, 0x65, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x77, 0x65, 0x72, - 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x52, 0x05, 0x6c, 0x6f, 0x77, 0x65, 0x72, - 0x12, 0x28, 0x0a, 0x05, 0x75, 0x70, 0x70, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x70, 0x70, 0x65, 0x72, 0x44, 0x69, 0x66, 0x66, 0x49, 0x6e, - 0x70, 0x75, 0x74, 0x52, 0x05, 0x75, 0x70, 0x70, 0x65, 0x72, 0x2a, 0x28, 0x0a, 0x07, 0x4e, 0x65, - 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, - 0x12, 0x08, 0x0a, 0x04, 0x48, 0x4f, 0x53, 0x54, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, - 0x4e, 0x45, 0x10, 0x02, 0x2a, 0x29, 0x0a, 0x0c, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x41, 0x4e, 0x44, 0x42, 0x4f, 0x58, 0x10, - 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x53, 0x45, 0x43, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, - 0x40, 0x0a, 0x09, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, - 0x42, 0x49, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x45, 0x43, 0x52, 0x45, 0x54, - 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x53, 0x48, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, 0x43, - 0x41, 0x43, 0x48, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4d, 0x50, 0x46, 0x53, 0x10, - 0x04, 0x2a, 0x31, 0x0a, 0x11, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x6e, - 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, - 0x54, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x4f, - 0x46, 0x46, 0x10, 0x02, 0x2a, 0x36, 0x0a, 0x0f, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x68, 0x61, - 0x72, 0x69, 0x6e, 0x67, 0x4f, 0x70, 0x74, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, 0x52, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x49, 0x56, 0x41, 0x54, 0x45, 0x10, 0x01, - 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x4f, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x02, 0x42, 0x24, 0x5a, 0x22, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x72, 0x2f, - 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc = "" + + "\n" + + ",github.com/moby/buildkit/solver/pb/ops.proto\x12\x02pb\"\xe8\x02\n" + + "\x02Op\x12!\n" + + "\x06inputs\x18\x01 \x03(\v2\t.pb.InputR\x06inputs\x12 \n" + + "\x04exec\x18\x02 \x01(\v2\n" + + ".pb.ExecOpH\x00R\x04exec\x12&\n" + + "\x06source\x18\x03 \x01(\v2\f.pb.SourceOpH\x00R\x06source\x12 \n" + + "\x04file\x18\x04 \x01(\v2\n" + + ".pb.FileOpH\x00R\x04file\x12#\n" + + "\x05build\x18\x05 \x01(\v2\v.pb.BuildOpH\x00R\x05build\x12#\n" + + "\x05merge\x18\x06 \x01(\v2\v.pb.MergeOpH\x00R\x05merge\x12 \n" + + "\x04diff\x18\a \x01(\v2\n" + + ".pb.DiffOpH\x00R\x04diff\x12(\n" + + "\bplatform\x18\n" + + " \x01(\v2\f.pb.PlatformR\bplatform\x127\n" + + "\vconstraints\x18\v \x01(\v2\x15.pb.WorkerConstraintsR\vconstraintsB\x04\n" + + "\x02op\"\x96\x01\n" + + "\bPlatform\x12\"\n" + + "\fArchitecture\x18\x01 \x01(\tR\fArchitecture\x12\x0e\n" + + "\x02OS\x18\x02 \x01(\tR\x02OS\x12\x18\n" + + "\aVariant\x18\x03 \x01(\tR\aVariant\x12\x1c\n" + + "\tOSVersion\x18\x04 \x01(\tR\tOSVersion\x12\x1e\n" + + "\n" + + "OSFeatures\x18\x05 \x03(\tR\n" + + "OSFeatures\"5\n" + + "\x05Input\x12\x16\n" + + "\x06digest\x18\x01 \x01(\tR\x06digest\x12\x14\n" + + "\x05index\x18\x02 \x01(\x03R\x05index\"\xfa\x01\n" + + "\x06ExecOp\x12\x1c\n" + + "\x04meta\x18\x01 \x01(\v2\b.pb.MetaR\x04meta\x12!\n" + + "\x06mounts\x18\x02 \x03(\v2\t.pb.MountR\x06mounts\x12%\n" + + "\anetwork\x18\x03 \x01(\x0e2\v.pb.NetModeR\anetwork\x12,\n" + + "\bsecurity\x18\x04 \x01(\x0e2\x10.pb.SecurityModeR\bsecurity\x12+\n" + + "\tsecretenv\x18\x05 \x03(\v2\r.pb.SecretEnvR\tsecretenv\x12-\n" + + "\n" + + "cdiDevices\x18\x06 \x03(\v2\r.pb.CDIDeviceR\n" + + "cdiDevices\"\xf3\x02\n" + + "\x04Meta\x12\x12\n" + + "\x04args\x18\x01 \x03(\tR\x04args\x12\x10\n" + + "\x03env\x18\x02 \x03(\tR\x03env\x12\x10\n" + + "\x03cwd\x18\x03 \x01(\tR\x03cwd\x12\x12\n" + + "\x04user\x18\x04 \x01(\tR\x04user\x12)\n" + + "\tproxy_env\x18\x05 \x01(\v2\f.pb.ProxyEnvR\bproxyEnv\x12*\n" + + "\n" + + "extraHosts\x18\x06 \x03(\v2\n" + + ".pb.HostIPR\n" + + "extraHosts\x12\x1a\n" + + "\bhostname\x18\a \x01(\tR\bhostname\x12\"\n" + + "\x06ulimit\x18\t \x03(\v2\n" + + ".pb.UlimitR\x06ulimit\x12\"\n" + + "\fcgroupParent\x18\n" + + " \x01(\tR\fcgroupParent\x12<\n" + + "\x19removeMountStubsRecursive\x18\v \x01(\bR\x19removeMountStubsRecursive\x12&\n" + + "\x0evalidExitCodes\x18\f \x03(\x05R\x0evalidExitCodes\",\n" + + "\x06HostIP\x12\x12\n" + + "\x04Host\x18\x01 \x01(\tR\x04Host\x12\x0e\n" + + "\x02IP\x18\x02 \x01(\tR\x02IP\"D\n" + + "\x06Ulimit\x12\x12\n" + + "\x04Name\x18\x01 \x01(\tR\x04Name\x12\x12\n" + + "\x04Soft\x18\x02 \x01(\x03R\x04Soft\x12\x12\n" + + "\x04Hard\x18\x03 \x01(\x03R\x04Hard\"K\n" + + "\tSecretEnv\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x1a\n" + + "\boptional\x18\x03 \x01(\bR\boptional\";\n" + + "\tCDIDevice\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1a\n" + + "\boptional\x18\x02 \x01(\bR\boptional\"\xaa\x03\n" + + "\x05Mount\x12\x14\n" + + "\x05input\x18\x01 \x01(\x03R\x05input\x12\x1a\n" + + "\bselector\x18\x02 \x01(\tR\bselector\x12\x12\n" + + "\x04dest\x18\x03 \x01(\tR\x04dest\x12\x16\n" + + "\x06output\x18\x04 \x01(\x03R\x06output\x12\x1a\n" + + "\breadonly\x18\x05 \x01(\bR\breadonly\x12+\n" + + "\tmountType\x18\x06 \x01(\x0e2\r.pb.MountTypeR\tmountType\x12(\n" + + "\bTmpfsOpt\x18\x13 \x01(\v2\f.pb.TmpfsOptR\bTmpfsOpt\x12(\n" + + "\bcacheOpt\x18\x14 \x01(\v2\f.pb.CacheOptR\bcacheOpt\x12+\n" + + "\tsecretOpt\x18\x15 \x01(\v2\r.pb.SecretOptR\tsecretOpt\x12\"\n" + + "\x06SSHOpt\x18\x16 \x01(\v2\n" + + ".pb.SSHOptR\x06SSHOpt\x12\x1a\n" + + "\bresultID\x18\x17 \x01(\tR\bresultID\x129\n" + + "\fcontentCache\x18\x18 \x01(\x0e2\x15.pb.MountContentCacheR\fcontentCache\"\x1e\n" + + "\bTmpfsOpt\x12\x12\n" + + "\x04size\x18\x01 \x01(\x03R\x04size\"I\n" + + "\bCacheOpt\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12-\n" + + "\asharing\x18\x02 \x01(\x0e2\x13.pb.CacheSharingOptR\asharing\"o\n" + + "\tSecretOpt\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x10\n" + + "\x03uid\x18\x02 \x01(\rR\x03uid\x12\x10\n" + + "\x03gid\x18\x03 \x01(\rR\x03gid\x12\x12\n" + + "\x04mode\x18\x04 \x01(\rR\x04mode\x12\x1a\n" + + "\boptional\x18\x05 \x01(\bR\boptional\"l\n" + + "\x06SSHOpt\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x10\n" + + "\x03uid\x18\x02 \x01(\rR\x03uid\x12\x10\n" + + "\x03gid\x18\x03 \x01(\rR\x03gid\x12\x12\n" + + "\x04mode\x18\x04 \x01(\rR\x04mode\x12\x1a\n" + + "\boptional\x18\x05 \x01(\bR\boptional\"\x93\x01\n" + + "\bSourceOp\x12\x1e\n" + + "\n" + + "identifier\x18\x01 \x01(\tR\n" + + "identifier\x12-\n" + + "\x05attrs\x18\x02 \x03(\v2\x17.pb.SourceOp.AttrsEntryR\x05attrs\x1a8\n" + + "\n" + + "AttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xa9\x02\n" + + "\aBuildOp\x12\x18\n" + + "\abuilder\x18\x01 \x01(\x03R\abuilder\x12/\n" + + "\x06inputs\x18\x02 \x03(\v2\x17.pb.BuildOp.InputsEntryR\x06inputs\x12 \n" + + "\x03def\x18\x03 \x01(\v2\x0e.pb.DefinitionR\x03def\x12,\n" + + "\x05attrs\x18\x04 \x03(\v2\x16.pb.BuildOp.AttrsEntryR\x05attrs\x1aI\n" + + "\vInputsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.pb.BuildInputR\x05value:\x028\x01\x1a8\n" + + "\n" + + "AttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\"\n" + + "\n" + + "BuildInput\x12\x14\n" + + "\x05input\x18\x01 \x01(\x03R\x05input\"\x87\x03\n" + + "\n" + + "OpMetadata\x12!\n" + + "\fignore_cache\x18\x01 \x01(\bR\vignoreCache\x12A\n" + + "\vdescription\x18\x02 \x03(\v2\x1f.pb.OpMetadata.DescriptionEntryR\vdescription\x122\n" + + "\fexport_cache\x18\x04 \x01(\v2\x0f.pb.ExportCacheR\vexportCache\x12,\n" + + "\x04caps\x18\x05 \x03(\v2\x18.pb.OpMetadata.CapsEntryR\x04caps\x128\n" + + "\x0eprogress_group\x18\x06 \x01(\v2\x11.pb.ProgressGroupR\rprogressGroup\x1a>\n" + + "\x10DescriptionEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1a7\n" + + "\tCapsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\bR\x05value:\x028\x01\"\xb4\x01\n" + + "\x06Source\x127\n" + + "\tlocations\x18\x01 \x03(\v2\x19.pb.Source.LocationsEntryR\tlocations\x12$\n" + + "\x05infos\x18\x02 \x03(\v2\x0e.pb.SourceInfoR\x05infos\x1aK\n" + + "\x0eLocationsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12#\n" + + "\x05value\x18\x02 \x01(\v2\r.pb.LocationsR\x05value:\x028\x01\"7\n" + + "\tLocations\x12*\n" + + "\tlocations\x18\x01 \x03(\v2\f.pb.LocationR\tlocations\"\x88\x01\n" + + "\n" + + "SourceInfo\x12\x1a\n" + + "\bfilename\x18\x01 \x01(\tR\bfilename\x12\x12\n" + + "\x04data\x18\x02 \x01(\fR\x04data\x12.\n" + + "\n" + + "definition\x18\x03 \x01(\v2\x0e.pb.DefinitionR\n" + + "definition\x12\x1a\n" + + "\blanguage\x18\x04 \x01(\tR\blanguage\"O\n" + + "\bLocation\x12 \n" + + "\vsourceIndex\x18\x01 \x01(\x05R\vsourceIndex\x12!\n" + + "\x06ranges\x18\x02 \x03(\v2\t.pb.RangeR\x06ranges\"K\n" + + "\x05Range\x12\"\n" + + "\x05start\x18\x01 \x01(\v2\f.pb.PositionR\x05start\x12\x1e\n" + + "\x03end\x18\x02 \x01(\v2\f.pb.PositionR\x03end\"<\n" + + "\bPosition\x12\x12\n" + + "\x04line\x18\x01 \x01(\x05R\x04line\x12\x1c\n" + + "\tcharacter\x18\x02 \x01(\x05R\tcharacter\"#\n" + + "\vExportCache\x12\x14\n" + + "\x05Value\x18\x01 \x01(\bR\x05Value\"G\n" + + "\rProgressGroup\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x12\n" + + "\x04weak\x18\x03 \x01(\bR\x04weak\"\x9f\x01\n" + + "\bProxyEnv\x12\x1d\n" + + "\n" + + "http_proxy\x18\x01 \x01(\tR\thttpProxy\x12\x1f\n" + + "\vhttps_proxy\x18\x02 \x01(\tR\n" + + "httpsProxy\x12\x1b\n" + + "\tftp_proxy\x18\x03 \x01(\tR\bftpProxy\x12\x19\n" + + "\bno_proxy\x18\x04 \x01(\tR\anoProxy\x12\x1b\n" + + "\tall_proxy\x18\x05 \x01(\tR\ballProxy\"+\n" + + "\x11WorkerConstraints\x12\x16\n" + + "\x06filter\x18\x01 \x03(\tR\x06filter\"\xc9\x01\n" + + "\n" + + "Definition\x12\x10\n" + + "\x03def\x18\x01 \x03(\fR\x03def\x128\n" + + "\bmetadata\x18\x02 \x03(\v2\x1c.pb.Definition.MetadataEntryR\bmetadata\x12\"\n" + + "\x06Source\x18\x03 \x01(\v2\n" + + ".pb.SourceR\x06Source\x1aK\n" + + "\rMetadataEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12$\n" + + "\x05value\x18\x02 \x01(\v2\x0e.pb.OpMetadataR\x05value:\x028\x01\"2\n" + + "\x06FileOp\x12(\n" + + "\aactions\x18\x02 \x03(\v2\x0e.pb.FileActionR\aactions\"\xca\x02\n" + + "\n" + + "FileAction\x12\x14\n" + + "\x05input\x18\x01 \x01(\x03R\x05input\x12&\n" + + "\x0esecondaryInput\x18\x02 \x01(\x03R\x0esecondaryInput\x12\x16\n" + + "\x06output\x18\x03 \x01(\x03R\x06output\x12(\n" + + "\x04copy\x18\x04 \x01(\v2\x12.pb.FileActionCopyH\x00R\x04copy\x12.\n" + + "\x06mkfile\x18\x05 \x01(\v2\x14.pb.FileActionMkFileH\x00R\x06mkfile\x12+\n" + + "\x05mkdir\x18\x06 \x01(\v2\x13.pb.FileActionMkDirH\x00R\x05mkdir\x12\"\n" + + "\x02rm\x18\a \x01(\v2\x10.pb.FileActionRmH\x00R\x02rm\x121\n" + + "\asymlink\x18\b \x01(\v2\x15.pb.FileActionSymlinkH\x00R\asymlinkB\b\n" + + "\x06action\"\xde\x04\n" + + "\x0eFileActionCopy\x12\x10\n" + + "\x03src\x18\x01 \x01(\tR\x03src\x12\x12\n" + + "\x04dest\x18\x02 \x01(\tR\x04dest\x12\"\n" + + "\x05owner\x18\x03 \x01(\v2\f.pb.ChownOptR\x05owner\x12\x12\n" + + "\x04mode\x18\x04 \x01(\x05R\x04mode\x12$\n" + + "\rfollowSymlink\x18\x05 \x01(\bR\rfollowSymlink\x12(\n" + + "\x0fdirCopyContents\x18\x06 \x01(\bR\x0fdirCopyContents\x12J\n" + + " attemptUnpackDockerCompatibility\x18\a \x01(\bR attemptUnpackDockerCompatibility\x12&\n" + + "\x0ecreateDestPath\x18\b \x01(\bR\x0ecreateDestPath\x12$\n" + + "\rallowWildcard\x18\t \x01(\bR\rallowWildcard\x12.\n" + + "\x12allowEmptyWildcard\x18\n" + + " \x01(\bR\x12allowEmptyWildcard\x12\x1c\n" + + "\ttimestamp\x18\v \x01(\x03R\ttimestamp\x12)\n" + + "\x10include_patterns\x18\f \x03(\tR\x0fincludePatterns\x12)\n" + + "\x10exclude_patterns\x18\r \x03(\tR\x0fexcludePatterns\x12F\n" + + "\x1ealwaysReplaceExistingDestPaths\x18\x0e \x01(\bR\x1ealwaysReplaceExistingDestPaths\x12\x18\n" + + "\amodeStr\x18\x0f \x01(\tR\amodeStr\"\x90\x01\n" + + "\x10FileActionMkFile\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12\x12\n" + + "\x04mode\x18\x02 \x01(\x05R\x04mode\x12\x12\n" + + "\x04data\x18\x03 \x01(\fR\x04data\x12\"\n" + + "\x05owner\x18\x04 \x01(\v2\f.pb.ChownOptR\x05owner\x12\x1c\n" + + "\ttimestamp\x18\x05 \x01(\x03R\ttimestamp\"\x89\x01\n" + + "\x11FileActionSymlink\x12\x18\n" + + "\aoldpath\x18\x01 \x01(\tR\aoldpath\x12\x18\n" + + "\anewpath\x18\x02 \x01(\tR\anewpath\x12\"\n" + + "\x05owner\x18\x03 \x01(\v2\f.pb.ChownOptR\x05owner\x12\x1c\n" + + "\ttimestamp\x18\x04 \x01(\x03R\ttimestamp\"\x9d\x01\n" + + "\x0fFileActionMkDir\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12\x12\n" + + "\x04mode\x18\x02 \x01(\x05R\x04mode\x12 \n" + + "\vmakeParents\x18\x03 \x01(\bR\vmakeParents\x12\"\n" + + "\x05owner\x18\x04 \x01(\v2\f.pb.ChownOptR\x05owner\x12\x1c\n" + + "\ttimestamp\x18\x05 \x01(\x03R\ttimestamp\"n\n" + + "\fFileActionRm\x12\x12\n" + + "\x04path\x18\x01 \x01(\tR\x04path\x12$\n" + + "\rallowNotFound\x18\x02 \x01(\bR\rallowNotFound\x12$\n" + + "\rallowWildcard\x18\x03 \x01(\bR\rallowWildcard\"N\n" + + "\bChownOpt\x12\x1f\n" + + "\x04user\x18\x01 \x01(\v2\v.pb.UserOptR\x04user\x12!\n" + + "\x05group\x18\x02 \x01(\v2\v.pb.UserOptR\x05group\"S\n" + + "\aUserOpt\x12*\n" + + "\x06byName\x18\x01 \x01(\v2\x10.pb.NamedUserOptH\x00R\x06byName\x12\x14\n" + + "\x04byID\x18\x02 \x01(\rH\x00R\x04byIDB\x06\n" + + "\x04user\"8\n" + + "\fNamedUserOpt\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x14\n" + + "\x05input\x18\x02 \x01(\x03R\x05input\"\"\n" + + "\n" + + "MergeInput\x12\x14\n" + + "\x05input\x18\x01 \x01(\x03R\x05input\"1\n" + + "\aMergeOp\x12&\n" + + "\x06inputs\x18\x01 \x03(\v2\x0e.pb.MergeInputR\x06inputs\"&\n" + + "\x0eLowerDiffInput\x12\x14\n" + + "\x05input\x18\x01 \x01(\x03R\x05input\"&\n" + + "\x0eUpperDiffInput\x12\x14\n" + + "\x05input\x18\x01 \x01(\x03R\x05input\"\\\n" + + "\x06DiffOp\x12(\n" + + "\x05lower\x18\x01 \x01(\v2\x12.pb.LowerDiffInputR\x05lower\x12(\n" + + "\x05upper\x18\x02 \x01(\v2\x12.pb.UpperDiffInputR\x05upper*(\n" + + "\aNetMode\x12\t\n" + + "\x05UNSET\x10\x00\x12\b\n" + + "\x04HOST\x10\x01\x12\b\n" + + "\x04NONE\x10\x02*)\n" + + "\fSecurityMode\x12\v\n" + + "\aSANDBOX\x10\x00\x12\f\n" + + "\bINSECURE\x10\x01*@\n" + + "\tMountType\x12\b\n" + + "\x04BIND\x10\x00\x12\n" + + "\n" + + "\x06SECRET\x10\x01\x12\a\n" + + "\x03SSH\x10\x02\x12\t\n" + + "\x05CACHE\x10\x03\x12\t\n" + + "\x05TMPFS\x10\x04*1\n" + + "\x11MountContentCache\x12\v\n" + + "\aDEFAULT\x10\x00\x12\x06\n" + + "\x02ON\x10\x01\x12\a\n" + + "\x03OFF\x10\x02*6\n" + + "\x0fCacheSharingOpt\x12\n" + + "\n" + + "\x06SHARED\x10\x00\x12\v\n" + + "\aPRIVATE\x10\x01\x12\n" + + "\n" + + "\x06LOCKED\x10\x02B$Z\"github.com/moby/buildkit/solver/pbb\x06proto3" var ( file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescData = file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc + file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescData []byte ) func file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescData) + file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc), len(file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc))) }) return file_github_com_moby_buildkit_solver_pb_ops_proto_rawDescData } @@ -3984,7 +3836,7 @@ func file_github_com_moby_buildkit_solver_pb_ops_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc), len(file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc)), NumEnums: 5, NumMessages: 51, NumExtensions: 0, @@ -3996,7 +3848,6 @@ func file_github_com_moby_buildkit_solver_pb_ops_proto_init() { MessageInfos: file_github_com_moby_buildkit_solver_pb_ops_proto_msgTypes, }.Build() File_github_com_moby_buildkit_solver_pb_ops_proto = out.File - file_github_com_moby_buildkit_solver_pb_ops_proto_rawDesc = nil file_github_com_moby_buildkit_solver_pb_ops_proto_goTypes = nil file_github_com_moby_buildkit_solver_pb_ops_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/platform.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/platform.go index b72bb4a1c..9030615cf 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/platform.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/pb/platform.go @@ -1,6 +1,8 @@ package pb import ( + "slices" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -12,7 +14,7 @@ func (p *Platform) Spec() ocispecs.Platform { OSVersion: p.OSVersion, } if p.OSFeatures != nil { - result.OSFeatures = append([]string{}, p.OSFeatures...) + result.OSFeatures = slices.Clone(p.OSFeatures) } return result } @@ -25,7 +27,7 @@ func PlatformFromSpec(p ocispecs.Platform) *Platform { OSVersion: p.OSVersion, } if p.OSFeatures != nil { - result.OSFeatures = append([]string{}, p.OSFeatures...) + result.OSFeatures = slices.Clone(p.OSFeatures) } return result } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/result/attestation.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/result/attestation.go index 2fee27824..de56e3c40 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/result/attestation.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/solver/result/attestation.go @@ -1,6 +1,8 @@ package result import ( + "context" + pb "github.com/moby/buildkit/frontend/gateway/pb" digest "github.com/opencontainers/go-digest" ) @@ -23,7 +25,7 @@ type Attestation[T any] struct { Ref T Path string - ContentFunc func() ([]byte, error) + ContentFunc func(context.Context) ([]byte, error) InToto InTotoAttestation } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go index 9ca101eb7..4b61d0a6d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/json.go @@ -1,4 +1,4 @@ -package moby_buildkit_v1_sourcepolicy //nolint:revive +package moby_buildkit_v1_sourcepolicy //nolint:revive,staticcheck import ( "github.com/moby/buildkit/util/gogo/proto" diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go index 7f88a3472..4ae587160 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/sourcepolicy/pb/policy.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/sourcepolicy/pb/policy.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -178,13 +179,12 @@ func (MatchType) EnumDescriptor() ([]byte, []int) { // Rule defines the action(s) to take when a source is matched type Rule struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Action PolicyAction `protobuf:"varint,1,opt,name=action,proto3,enum=moby.buildkit.v1.sourcepolicy.PolicyAction" json:"action,omitempty"` + Selector *Selector `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` + Updates *Update `protobuf:"bytes,3,opt,name=updates,proto3" json:"updates,omitempty"` unknownFields protoimpl.UnknownFields - - Action PolicyAction `protobuf:"varint,1,opt,name=action,proto3,enum=moby.buildkit.v1.sourcepolicy.PolicyAction" json:"action,omitempty"` - Selector *Selector `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` - Updates *Update `protobuf:"bytes,3,opt,name=updates,proto3" json:"updates,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Rule) Reset() { @@ -240,12 +240,11 @@ func (x *Rule) GetUpdates() *Update { // Update contains updates to the matched build step after rule is applied type Update struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *Update) Reset() { @@ -294,14 +293,13 @@ func (x *Update) GetAttrs() map[string]string { // Selector identifies a source to match a policy to type Selector struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` // MatchType is the type of match to perform on the source identifier - MatchType MatchType `protobuf:"varint,2,opt,name=match_type,json=matchType,proto3,enum=moby.buildkit.v1.sourcepolicy.MatchType" json:"match_type,omitempty"` - Constraints []*AttrConstraint `protobuf:"bytes,3,rep,name=constraints,proto3" json:"constraints,omitempty"` + MatchType MatchType `protobuf:"varint,2,opt,name=match_type,json=matchType,proto3,enum=moby.buildkit.v1.sourcepolicy.MatchType" json:"match_type,omitempty"` + Constraints []*AttrConstraint `protobuf:"bytes,3,rep,name=constraints,proto3" json:"constraints,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Selector) Reset() { @@ -357,13 +355,12 @@ func (x *Selector) GetConstraints() []*AttrConstraint { // AttrConstraint defines a constraint on a source attribute type AttrConstraint struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Condition AttrMatch `protobuf:"varint,3,opt,name=condition,proto3,enum=moby.buildkit.v1.sourcepolicy.AttrMatch" json:"condition,omitempty"` unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - Condition AttrMatch `protobuf:"varint,3,opt,name=condition,proto3,enum=moby.buildkit.v1.sourcepolicy.AttrMatch" json:"condition,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AttrConstraint) Reset() { @@ -419,12 +416,11 @@ func (x *AttrConstraint) GetCondition() AttrMatch { // Policy is the list of rules the policy engine will perform type Policy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // Currently 1 + Rules []*Rule `protobuf:"bytes,2,rep,name=rules,proto3" json:"rules,omitempty"` unknownFields protoimpl.UnknownFields - - Version int64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // Currently 1 - Rules []*Rule `protobuf:"bytes,2,rep,name=rules,proto3" json:"rules,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Policy) Reset() { @@ -473,88 +469,57 @@ func (x *Policy) GetRules() []*Rule { var File_github_com_moby_buildkit_sourcepolicy_pb_policy_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc = []byte{ - 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2f, 0x70, 0x62, 0x2f, 0x70, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xd1, 0x01, 0x0a, 0x04, 0x52, 0x75, 0x6c, 0x65, 0x12, - 0x43, 0x0a, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x2b, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, - 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, - 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x43, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, - 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x75, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x62, - 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x07, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x06, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, - 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x46, 0x0a, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x61, 0x74, 0x74, 0x72, 0x73, 0x1a, 0x38, 0x0a, - 0x0a, 0x41, 0x74, 0x74, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc4, 0x01, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x12, 0x47, 0x0a, 0x0a, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5f, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x09, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x4f, 0x0a, - 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, - 0x63, 0x79, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, - 0x74, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x74, 0x73, 0x22, 0x80, - 0x01, 0x0a, 0x0e, 0x41, 0x74, 0x74, 0x72, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x72, 0x61, 0x69, 0x6e, - 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x46, 0x0a, 0x09, 0x63, 0x6f, 0x6e, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x6d, - 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x41, 0x74, 0x74, - 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x22, 0x5d, 0x0a, 0x06, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x2e, 0x52, 0x75, 0x6c, 0x65, 0x52, 0x05, 0x72, 0x75, 0x6c, 0x65, 0x73, - 0x2a, 0x30, 0x0a, 0x0c, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, - 0x45, 0x4e, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x56, 0x45, 0x52, 0x54, - 0x10, 0x02, 0x2a, 0x31, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x12, - 0x09, 0x0a, 0x05, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x4f, - 0x54, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x41, 0x54, 0x43, - 0x48, 0x45, 0x53, 0x10, 0x02, 0x2a, 0x2f, 0x0a, 0x09, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x57, 0x49, 0x4c, 0x44, 0x43, 0x41, 0x52, 0x44, 0x10, 0x00, - 0x12, 0x09, 0x0a, 0x05, 0x45, 0x58, 0x41, 0x43, 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x52, - 0x45, 0x47, 0x45, 0x58, 0x10, 0x02, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, - 0x69, 0x74, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2f, - 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x62, 0x79, 0x5f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, - 0x5f, 0x76, 0x31, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc = "" + + "\n" + + "5github.com/moby/buildkit/sourcepolicy/pb/policy.proto\x12\x1dmoby.buildkit.v1.sourcepolicy\"\xd1\x01\n" + + "\x04Rule\x12C\n" + + "\x06action\x18\x01 \x01(\x0e2+.moby.buildkit.v1.sourcepolicy.PolicyActionR\x06action\x12C\n" + + "\bselector\x18\x02 \x01(\v2'.moby.buildkit.v1.sourcepolicy.SelectorR\bselector\x12?\n" + + "\aupdates\x18\x03 \x01(\v2%.moby.buildkit.v1.sourcepolicy.UpdateR\aupdates\"\xaa\x01\n" + + "\x06Update\x12\x1e\n" + + "\n" + + "identifier\x18\x01 \x01(\tR\n" + + "identifier\x12F\n" + + "\x05attrs\x18\x02 \x03(\v20.moby.buildkit.v1.sourcepolicy.Update.AttrsEntryR\x05attrs\x1a8\n" + + "\n" + + "AttrsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xc4\x01\n" + + "\bSelector\x12\x1e\n" + + "\n" + + "identifier\x18\x01 \x01(\tR\n" + + "identifier\x12G\n" + + "\n" + + "match_type\x18\x02 \x01(\x0e2(.moby.buildkit.v1.sourcepolicy.MatchTypeR\tmatchType\x12O\n" + + "\vconstraints\x18\x03 \x03(\v2-.moby.buildkit.v1.sourcepolicy.AttrConstraintR\vconstraints\"\x80\x01\n" + + "\x0eAttrConstraint\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x12F\n" + + "\tcondition\x18\x03 \x01(\x0e2(.moby.buildkit.v1.sourcepolicy.AttrMatchR\tcondition\"]\n" + + "\x06Policy\x12\x18\n" + + "\aversion\x18\x01 \x01(\x03R\aversion\x129\n" + + "\x05rules\x18\x02 \x03(\v2#.moby.buildkit.v1.sourcepolicy.RuleR\x05rules*0\n" + + "\fPolicyAction\x12\t\n" + + "\x05ALLOW\x10\x00\x12\b\n" + + "\x04DENY\x10\x01\x12\v\n" + + "\aCONVERT\x10\x02*1\n" + + "\tAttrMatch\x12\t\n" + + "\x05EQUAL\x10\x00\x12\f\n" + + "\bNOTEQUAL\x10\x01\x12\v\n" + + "\aMATCHES\x10\x02*/\n" + + "\tMatchType\x12\f\n" + + "\bWILDCARD\x10\x00\x12\t\n" + + "\x05EXACT\x10\x01\x12\t\n" + + "\x05REGEX\x10\x02BHZFgithub.com/moby/buildkit/sourcepolicy/pb;moby_buildkit_v1_sourcepolicyb\x06proto3" var ( file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescData = file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc + file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescData []byte ) func file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescData) + file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc), len(file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc))) }) return file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDescData } @@ -597,7 +562,7 @@ func file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc), len(file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc)), NumEnums: 3, NumMessages: 6, NumExtensions: 0, @@ -609,7 +574,6 @@ func file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_init() { MessageInfos: file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_msgTypes, }.Build() File_github_com_moby_buildkit_sourcepolicy_pb_policy_proto = out.File - file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_rawDesc = nil file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_goTypes = nil file_github_com_moby_buildkit_sourcepolicy_pb_policy_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/caps.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/caps.go index f43a7545a..a49ed79bb 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/caps.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/caps.go @@ -1,8 +1,9 @@ package apicaps import ( + "cmp" "fmt" - "sort" + "slices" "strings" pb "github.com/moby/buildkit/util/apicaps/pb" @@ -76,8 +77,8 @@ func (l *CapList) All() []*pb.APICap { DisabledAlternative: c.DisabledAlternative, }) } - sort.Slice(out, func(i, j int) bool { - return out[i].ID < out[j].ID + slices.SortFunc(out, func(a, b *pb.APICap) int { + return cmp.Compare(a.ID, b.ID) }) return out } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go index 0d2fb580d..a68d49367 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/util/apicaps/pb/caps.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -22,16 +23,15 @@ const ( // APICap defines a capability supported by the service type APICap struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"` - Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"` // Unused. May be used for warnings in the future - DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"` // Reason key for detection code - DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"` // Message to the user - DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"` // Identifier that updated client could catch. + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"` + Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"` // Unused. May be used for warnings in the future + DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"` // Reason key for detection code + DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"` // Message to the user + DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"` // Identifier that updated client could catch. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *APICap) Reset() { @@ -108,41 +108,27 @@ func (x *APICap) GetDisabledAlternative() string { var File_github_com_moby_buildkit_util_apicaps_pb_caps_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc = []byte{ - 0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, - 0x61, 0x70, 0x69, 0x63, 0x61, 0x70, 0x73, 0x2f, 0x70, 0x62, 0x2f, 0x63, 0x61, 0x70, 0x73, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x6d, 0x6f, 0x62, 0x79, 0x2e, 0x62, 0x75, 0x69, 0x6c, - 0x64, 0x6b, 0x69, 0x74, 0x2e, 0x76, 0x31, 0x2e, 0x61, 0x70, 0x69, 0x63, 0x61, 0x70, 0x73, 0x22, - 0xda, 0x01, 0x0a, 0x06, 0x41, 0x50, 0x49, 0x43, 0x61, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x49, 0x44, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x45, 0x6e, - 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x45, 0x6e, 0x61, - 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, - 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x44, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, - 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x4d, 0x73, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x4d, 0x73, 0x67, 0x12, 0x30, 0x0a, 0x13, 0x44, 0x69, - 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, - 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, - 0x64, 0x41, 0x6c, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x42, 0x43, 0x5a, 0x41, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, 0x79, 0x2f, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, 0x61, 0x70, - 0x69, 0x63, 0x61, 0x70, 0x73, 0x2f, 0x70, 0x62, 0x3b, 0x6d, 0x6f, 0x62, 0x79, 0x5f, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x5f, 0x76, 0x31, 0x5f, 0x61, 0x70, 0x69, 0x63, 0x61, 0x70, - 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc = "" + + "\n" + + "3github.com/moby/buildkit/util/apicaps/pb/caps.proto\x12\x18moby.buildkit.v1.apicaps\"\xda\x01\n" + + "\x06APICap\x12\x0e\n" + + "\x02ID\x18\x01 \x01(\tR\x02ID\x12\x18\n" + + "\aEnabled\x18\x02 \x01(\bR\aEnabled\x12\x1e\n" + + "\n" + + "Deprecated\x18\x03 \x01(\bR\n" + + "Deprecated\x12&\n" + + "\x0eDisabledReason\x18\x04 \x01(\tR\x0eDisabledReason\x12,\n" + + "\x11DisabledReasonMsg\x18\x05 \x01(\tR\x11DisabledReasonMsg\x120\n" + + "\x13DisabledAlternative\x18\x06 \x01(\tR\x13DisabledAlternativeBCZAgithub.com/moby/buildkit/util/apicaps/pb;moby_buildkit_v1_apicapsb\x06proto3" var ( file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescData = file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc + file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescData []byte ) func file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescData) + file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc), len(file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc))) }) return file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDescData } @@ -168,7 +154,7 @@ func file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc), len(file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -179,7 +165,6 @@ func file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_init() { MessageInfos: file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_msgTypes, }.Build() File_github_com_moby_buildkit_util_apicaps_pb_caps_proto = out.File - file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_rawDesc = nil file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_goTypes = nil file_github_com_moby_buildkit_util_apicaps_pb_caps_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go index 6917469fe..3fb69f26d 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go @@ -12,8 +12,9 @@ const ( var ( Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") - DefaultCNIBinDir = filepath.Join(ConfigDir, "bin") - DefaultCNIConfigPath = filepath.Join(ConfigDir, "cni.json") + defaultContainerdDir = filepath.Join(os.Getenv("ProgramFiles"), "containerd") + DefaultCNIBinDir = filepath.Join(defaultContainerdDir, "cni", "bin") + DefaultCNIConfigPath = filepath.Join(defaultContainerdDir, "cni", "conf", "0-containerd-nat.conf") ) var ( diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/copy.go index 505a08187..c1c10b5af 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/copy.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -27,7 +27,7 @@ type localFetcher struct { } func (f *localFetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { - r, err := f.Provider.ReaderAt(ctx, desc) + r, err := f.ReaderAt(ctx, desc) if err != nil { return nil, err } @@ -42,7 +42,7 @@ type rc struct { func (r *rc) Read(b []byte) (int, error) { n, err := r.ReadAt(b, r.offset) r.offset += int64(n) - if n > 0 && err == io.EOF { + if n > 0 && errors.Is(err, io.EOF) { err = nil } return n, err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go index f7105fd9e..560b9fcfc 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go @@ -54,8 +54,8 @@ func (r *readerAt) ReadAt(b []byte, off int64) (int, error) { var totalN int for len(b) > 0 { - n, err := r.Reader.Read(b) - if err == io.EOF && n == len(b) { + n, err := r.Read(b) + if errors.Is(err, io.EOF) && n == len(b) { err = nil } r.offset += int64(n) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/source.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/source.go index a12d60d1a..4ee563b1e 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/source.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/contentutil/source.go @@ -2,6 +2,7 @@ package contentutil import ( "net/url" + "slices" "strings" "github.com/containerd/containerd/v2/core/content" @@ -24,11 +25,8 @@ func HasSource(info content.Info, refspec reference.Spec) (bool, error) { return false, nil } - for _, repo := range strings.Split(repoLabel, ",") { - // the target repo is not a candidate - if repo == target { - return true, nil - } + if slices.Contains(strings.Split(repoLabel, ","), target) { + return true, nil } return false, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go index faf8411b2..e986d6764 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go @@ -4,7 +4,7 @@ import ( "context" "io" "math/rand" - "sort" + "slices" "sync" "time" @@ -211,7 +211,7 @@ func (c *call[T]) Err() error { } } -func (c *call[T]) Value(key interface{}) interface{} { +func (c *call[T]) Value(key any) any { if key == contextKey { return c.progressState } @@ -299,7 +299,7 @@ func (ps *progressState) run(pr progress.Reader) { for { p, err := pr.Read(context.TODO()) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { ps.mu.Lock() ps.done = true ps.mu.Unlock() @@ -330,8 +330,8 @@ func (ps *progressState) add(pw progress.Writer) { for _, p := range ps.items { plist = append(plist, p) } - sort.Slice(plist, func(i, j int) bool { - return plist[i].Timestamp.Before(plist[j].Timestamp) + slices.SortFunc(plist, func(a, b *progress.Progress) int { + return a.Timestamp.Compare(b.Timestamp) }) for _, p := range plist { rw.WriteRawProgress(p) @@ -353,7 +353,7 @@ func (ps *progressState) close(pw progress.Writer) { for i, w := range ps.writers { if w == rw { w.Close() - ps.writers = append(ps.writers[:i], ps.writers[i+1:]...) + ps.writers = slices.Delete(ps.writers, i, i+1) break } } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go index 23ff8aa4f..39d439794 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go @@ -6,6 +6,7 @@ import ( "io" "os" "os/exec" + "slices" "strings" "github.com/pkg/errors" @@ -120,7 +121,7 @@ func NewGitCLI(opts ...Option) *GitCLI { // with the given options applied on top. func (cli *GitCLI) New(opts ...Option) *GitCLI { clone := *cli - clone.args = append([]string{}, cli.args...) + clone.args = slices.Clone(cli.args) for _, opt := range opts { opt(&clone) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go index 59d658264..cb562acf5 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_ref.go @@ -93,8 +93,8 @@ func ParseGitRef(ref string) (*GitRef, error) { if res.IndistinguishableFromLocal { _, res.Remote, _ = strings.Cut(res.Remote, "://") } - if remote.Fragment != nil { - res.Commit, res.SubDir = remote.Fragment.Ref, remote.Fragment.Subdir + if remote.Opts != nil { + res.Commit, res.SubDir = remote.Opts.Ref, remote.Opts.Subdir } repoSplitBySlash := strings.Split(res.Remote, "/") diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_url.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_url.go index 0f1ff505c..338893b27 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_url.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/gitutil/git_url.go @@ -47,31 +47,31 @@ type GitURL struct { Path string // User is the username/password to access the host User *url.Userinfo - // Fragment can contain additional metadata - Fragment *GitURLFragment + // Opts can contain additional metadata + Opts *GitURLOpts // Remote is a valid URL remote to pass into the Git CLI tooling (i.e. // without the fragment metadata) Remote string } -// GitURLFragment is the buildkit-specific metadata extracted from the fragment +// GitURLOpts is the buildkit-specific metadata extracted from the fragment // of a remote URL. -type GitURLFragment struct { +type GitURLOpts struct { // Ref is the git reference Ref string // Subdir is the sub-directory inside the git repository to use Subdir string } -// splitGitFragment splits a git URL fragment into its respective git +// parseOpts splits a git URL fragment into its respective git // reference and subdirectory components. -func splitGitFragment(fragment string) *GitURLFragment { +func parseOpts(fragment string) *GitURLOpts { if fragment == "" { return nil } ref, subdir, _ := strings.Cut(fragment, ":") - return &GitURLFragment{Ref: ref, Subdir: subdir} + return &GitURLOpts{Ref: ref, Subdir: subdir} } // ParseURL parses a BuildKit-style Git URL (that may contain additional @@ -106,27 +106,27 @@ func IsGitTransport(remote string) bool { } func fromURL(url *url.URL) *GitURL { - withoutFragment := *url - withoutFragment.Fragment = "" + withoutOpts := *url + withoutOpts.Fragment = "" return &GitURL{ - Scheme: url.Scheme, - User: url.User, - Host: url.Host, - Path: url.Path, - Fragment: splitGitFragment(url.Fragment), - Remote: withoutFragment.String(), + Scheme: url.Scheme, + User: url.User, + Host: url.Host, + Path: url.Path, + Opts: parseOpts(url.Fragment), + Remote: withoutOpts.String(), } } func fromSCPStyleURL(url *sshutil.SCPStyleURL) *GitURL { - withoutFragment := *url - withoutFragment.Fragment = "" + withoutOpts := *url + withoutOpts.Fragment = "" return &GitURL{ - Scheme: SSHProtocol, - User: url.User, - Host: url.Host, - Path: url.Path, - Fragment: splitGitFragment(url.Fragment), - Remote: withoutFragment.String(), + Scheme: SSHProtocol, + User: url.User, + Host: url.Host, + Path: url.Path, + Opts: parseOpts(url.Fragment), + Remote: withoutOpts.String(), } } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index ecb0dbe79..8689dfff0 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -45,7 +45,7 @@ func ToGRPC(ctx context.Context, err error) error { // If the original error was wrapped with more context than the GRPCStatus error, // copy the original message to the GRPCStatus error - if err.Error() != st.Message() { + if errorHasMoreContext(err, st) { pb := st.Proto() pb.Message = err.Error() st = status.FromProto(pb) @@ -72,6 +72,21 @@ func ToGRPC(ctx context.Context, err error) error { return st.Err() } +// errorHasMoreContext checks if the original error provides more context by having +// a different message or additional details than the Status. +func errorHasMoreContext(err error, st *status.Status) bool { + if errMessage := err.Error(); len(errMessage) > len(st.Message()) { + // check if the longer message in errMessage is only due to + // prepending with the status code + var grpcStatusError *grpcStatusError + if errors.As(err, &grpcStatusError) { + return st.Code() != grpcStatusError.st.Code() || st.Message() != grpcStatusError.st.Message() + } + return true + } + return false +} + func withDetails(ctx context.Context, s *status.Status, details ...proto.Message) (*status.Status, error) { if s.Code() == codes.OK { return nil, errors.New("no error details for status with code OK") @@ -124,7 +139,7 @@ func Code(err error) codes.Code { } func WrapCode(err error, code codes.Code) error { - return &withCode{error: err, code: code} + return &withCodeError{error: err, code: code} } func AsGRPCStatus(err error) (*status.Status, bool) { @@ -172,6 +187,8 @@ func FromGRPC(err error) error { for _, d := range pb.Details { m, err := typeurl.UnmarshalAny(d) if err != nil { + bklog.L.Debugf("failed to unmarshal error detail with type %q: %v", d.GetTypeUrl(), err) + n.Details = append(n.Details, d) continue } @@ -181,6 +198,7 @@ func FromGRPC(err error) error { case TypedErrorProto: details = append(details, v) default: + bklog.L.Debugf("unknown detail with type %T", v) n.Details = append(n.Details, d) } } @@ -219,16 +237,16 @@ func (e *grpcStatusError) GRPCStatus() *status.Status { return e.st } -type withCode struct { +type withCodeError struct { code codes.Code error } -func (e *withCode) Code() codes.Code { +func (e *withCodeError) Code() codes.Code { return e.code } -func (e *withCode) Unwrap() error { +func (e *withCodeError) Unwrap() error { return e.error } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go index a59207891..a4c774953 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go @@ -10,7 +10,7 @@ import ( "google.golang.org/grpc" ) -func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { +func UnaryServerInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { resp, err = handler(ctx, req) oldErr := err if err != nil { @@ -29,7 +29,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una return resp, err } -func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { +func StreamServerInterceptor(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { err := ToGRPC(ss.Context(), handler(srv, ss)) if err != nil { stack.Helper() @@ -37,7 +37,7 @@ func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.S return err } -func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { +func UnaryClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...)) if err != nil { stack.Helper() diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/config.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/config.go index ba1c6852e..db543eafc 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -13,6 +13,7 @@ import ( "github.com/containerd/containerd/v2/core/remotes" "github.com/containerd/containerd/v2/core/remotes/docker" "github.com/containerd/containerd/v2/pkg/reference" + cerrdefs "github.com/containerd/errdefs" "github.com/containerd/platforms" intoto "github.com/in-toto/in-toto-golang/in_toto" srctypes "github.com/moby/buildkit/source/types" @@ -115,8 +116,9 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co } if desc.MediaType == images.MediaTypeDockerSchema1Manifest { - dgst, dt, err := readSchema1Config(ctx, desc, fetcher) - return dgst, dt, err + errMsg := "support Docker Image manifest version 2, schema 1 has been removed. " + + "More information at https://docs.docker.com/go/deprecated-image-specs/" + return "", nil, errors.WithStack(cerrdefs.ErrConflict.WithMessage(errMsg)) } children := childrenConfigHandler(cache, platform) @@ -198,10 +200,7 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo } } -// specs.MediaTypeImageManifest, // TODO: detect schema1/manifest-list func DetectManifestMediaType(ra content.ReaderAt) (string, error) { - // TODO: schema1 - dt := make([]byte, ra.Size()) if _, err := ra.ReadAt(dt, 0); err != nil { return "", err diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/schema1.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/schema1.go deleted file mode 100644 index 72bee0c8f..000000000 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/imageutil/schema1.go +++ /dev/null @@ -1,88 +0,0 @@ -package imageutil - -import ( - "context" - "encoding/json" - "io" - "strings" - "time" - - "github.com/containerd/containerd/v2/core/remotes" - dockerspec "github.com/moby/docker-image-spec/specs-go/v1" - digest "github.com/opencontainers/go-digest" - ocispecs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -func readSchema1Config(ctx context.Context, desc ocispecs.Descriptor, fetcher remotes.Fetcher) (digest.Digest, []byte, error) { - rc, err := fetcher.Fetch(ctx, desc) - if err != nil { - return "", nil, err - } - defer rc.Close() - dt, err := io.ReadAll(rc) - if err != nil { - return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest") - } - dt, err = convertSchema1ConfigMeta(dt) - if err != nil { - return "", nil, err - } - return desc.Digest, dt, nil -} - -func convertSchema1ConfigMeta(in []byte) ([]byte, error) { - type history struct { - V1Compatibility string `json:"v1Compatibility"` - } - var m struct { - History []history `json:"history"` - } - if err := json.Unmarshal(in, &m); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal schema1 manifest") - } - if len(m.History) == 0 { - return nil, errors.Errorf("invalid schema1 manifest") - } - - var img dockerspec.DockerOCIImage - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") - } - - img.RootFS = ocispecs.RootFS{ - Type: "layers", // filled in by exporter - } - img.History = make([]ocispecs.History, len(m.History)) - - for i := range m.History { - var h v1History - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { - return nil, errors.Wrap(err, "failed to unmarshal history") - } - img.History[len(m.History)-i-1] = ocispecs.History{ - Author: h.Author, - Comment: h.Comment, - Created: &h.Created, - CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), - EmptyLayer: (h.ThrowAway != nil && *h.ThrowAway) || (h.Size != nil && *h.Size == 0), - } - } - - dt, err := json.MarshalIndent(img, "", " ") - if err != nil { - return nil, errors.Wrap(err, "failed to marshal schema1 config") - } - return dt, nil -} - -type v1History struct { - Author string `json:"author,omitempty"` - Created time.Time `json:"created"` - Comment string `json:"comment,omitempty"` - ThrowAway *bool `json:"throwaway,omitempty"` - Size *int `json:"Size,omitempty"` // used before ThrowAway field - ContainerConfig struct { - Cmd []string `json:"Cmd,omitempty"` - } `json:"container_config,omitempty"` -} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multireader.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multireader.go index 6afcca9fe..bcc9eb897 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multireader.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multireader.go @@ -2,6 +2,7 @@ package progress import ( "context" + "errors" "io" "sync" ) @@ -110,7 +111,7 @@ func (mr *MultiReader) handle() error { for { p, err := mr.main.Read(context.TODO()) if err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { mr.mu.Lock() cancelErr := context.Canceled for w, c := range mr.writers { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multiwriter.go index 673f9ab57..acebbc175 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/multiwriter.go @@ -2,7 +2,7 @@ package progress import ( "maps" - "sort" + "slices" "sync" "time" ) @@ -16,7 +16,7 @@ type MultiWriter struct { mu sync.Mutex items []*Progress writers map[rawProgressWriter]struct{} - meta map[string]interface{} + meta map[string]any } var _ rawProgressWriter = &MultiWriter{} @@ -24,7 +24,7 @@ var _ rawProgressWriter = &MultiWriter{} func NewMultiWriter(opts ...WriterOption) *MultiWriter { mw := &MultiWriter{ writers: map[rawProgressWriter]struct{}{}, - meta: map[string]interface{}{}, + meta: map[string]any{}, } for _, o := range opts { o(mw) @@ -49,8 +49,8 @@ func (ps *MultiWriter) Add(pw Writer) { ps.mu.Lock() plist := make([]*Progress, 0, len(ps.items)) plist = append(plist, ps.items...) - sort.Slice(plist, func(i, j int) bool { - return plist[i].Timestamp.Before(plist[j].Timestamp) + slices.SortFunc(plist, func(a, b *Progress) int { + return a.Timestamp.Compare(b.Timestamp) }) for _, p := range plist { rw.WriteRawProgress(p) @@ -70,7 +70,7 @@ func (ps *MultiWriter) Delete(pw Writer) { ps.mu.Unlock() } -func (ps *MultiWriter) Write(id string, v interface{}) error { +func (ps *MultiWriter) Write(id string, v any) error { p := &Progress{ ID: id, Timestamp: time.Now(), @@ -83,7 +83,7 @@ func (ps *MultiWriter) Write(id string, v interface{}) error { func (ps *MultiWriter) WriteRawProgress(p *Progress) error { meta := p.meta if len(ps.meta) > 0 { - meta = map[string]interface{}{} + meta = map[string]any{} maps.Copy(meta, p.meta) for k, v := range ps.meta { if _, ok := meta[k]; !ok { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progress.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progress.go index 89b8a7dae..e735c62c6 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progress.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progress.go @@ -4,7 +4,7 @@ import ( "context" "io" "maps" - "sort" + "slices" "sync" "time" @@ -67,7 +67,7 @@ func WithProgress(ctx context.Context, pw Writer) context.Context { return context.WithValue(ctx, contextKey, pw) } -func WithMetadata(key string, val interface{}) WriterOption { +func WithMetadata(key string, val any) WriterOption { return func(w Writer) { if pw, ok := w.(*progressWriter); ok { pw.meta[key] = val @@ -84,7 +84,7 @@ type Controller interface { } type Writer interface { - Write(id string, value interface{}) error + Write(id string, value any) error Close() error } @@ -95,8 +95,8 @@ type Reader interface { type Progress struct { ID string Timestamp time.Time - Sys interface{} - meta map[string]interface{} + Sys any + meta map[string]any } type Status struct { @@ -165,9 +165,8 @@ func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { for _, p := range dmap { out = append(out, p) } - - sort.Slice(out, func(i, j int) bool { - return out[i].Timestamp.Before(out[j].Timestamp) + slices.SortFunc(out, func(a, b *Progress) int { + return a.Timestamp.Compare(b.Timestamp) }) return out, nil @@ -207,7 +206,7 @@ func pipe() (*progressReader, *progressWriter, func(error)) { } func newWriter(pw *progressWriter) *progressWriter { - meta := make(map[string]interface{}) + meta := make(map[string]any) maps.Copy(meta, pw.meta) pw = &progressWriter{ reader: pw.reader, @@ -220,10 +219,10 @@ func newWriter(pw *progressWriter) *progressWriter { type progressWriter struct { done bool reader *progressReader - meta map[string]interface{} + meta map[string]any } -func (pw *progressWriter) Write(id string, v interface{}) error { +func (pw *progressWriter) Write(id string, v any) error { if pw.done { return errors.Errorf("writing %s to closed progress writer", id) } @@ -238,7 +237,7 @@ func (pw *progressWriter) Write(id string, v interface{}) error { func (pw *progressWriter) WriteRawProgress(p *Progress) error { meta := p.meta if len(pw.meta) > 0 { - meta = map[string]interface{}{} + meta = map[string]any{} maps.Copy(meta, p.meta) for k, v := range pw.meta { if _, ok := meta[k]; !ok { @@ -267,14 +266,14 @@ func (pw *progressWriter) Close() error { return nil } -func (p *Progress) Meta(key string) (interface{}, bool) { +func (p *Progress) Meta(key string) (any, bool) { v, ok := p.meta[key] return v, ok } type noOpWriter struct{} -func (pw *noOpWriter) Write(_ string, _ interface{}) error { +func (pw *noOpWriter) Write(_ string, _ any) error { return nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go index e48535960..9dd7b3247 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/colors.go @@ -40,7 +40,7 @@ func setUserDefinedTermColors(colorsEnv string) { for _, field := range fields { k, v, ok := strings.Cut(field, "=") if !ok || strings.Contains(v, "=") { - err := errors.New("A valid entry must have exactly two fields") + err := errors.New("valid entry must have exactly two fields") bklog.L.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field) continue } @@ -52,7 +52,7 @@ func setUserDefinedTermColors(colorsEnv string) { parseKeys(k, c) } } else { - err := errors.New("Colors must be a name from the pre-defined list or a valid 3-part RGB value") + err := errors.New("colors must be a name from the pre-defined list or a valid 3-part RGB value") bklog.L.WithError(err).Warnf("Unknown color value found in BUILDKIT_COLORS: %s=%s", k, v) } } @@ -63,7 +63,7 @@ func readBuildkitColorsEnv(colorsEnv string) []string { csvReader.Comma = ':' fields, err := csvReader.Fields(colorsEnv, nil) if err != nil { - bklog.L.WithError(err).Warnf("Could not parse BUILDKIT_COLORS. Falling back to defaults.") + bklog.L.WithError(err).Warnf("could not parse BUILDKIT_COLORS. Falling back to defaults.") return nil } return fields @@ -76,7 +76,7 @@ func readRGB(v string) aec.ANSI { return nil } if len(fields) != 3 { - err = errors.New("A valid RGB color must have three fields") + err = errors.New("valid RGB color must have three fields") bklog.L.WithError(err).Warnf("Could not parse value %s as valid RGB color. Ignoring.", v) return nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/display.go index c6ec68e6f..41d108586 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/display.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/display.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "slices" "sort" "strconv" "strings" @@ -518,8 +519,8 @@ func mergeIntervals(intervals []interval) []interval { } // sort intervals by start time - sort.Slice(intervals, func(i, j int) bool { - return intervals[i].start.Before(*intervals[j].start) + slices.SortFunc(intervals, func(a, b interval) int { + return a.start.Compare(*b.start) }) var merged []interval @@ -587,10 +588,8 @@ func (t *trace) triggerVertexEvent(v *client.Vertex) { old = *v } - changed := false - if v.Digest != old.Digest { - changed = true - } + changed := v.Digest != old.Digest + if v.Name != old.Name { changed = true } @@ -641,7 +640,11 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) { subVtxs: make(map[digest.Digest]client.Vertex), } if t.modeConsole { - group.term = vt100.NewVT100(termHeight, termWidth-termPad) + w := termWidth - termPad + if w <= 0 { + w = 1 + } + group.term = vt100.NewVT100(termHeight, w) } t.groups[v.ProgressGroup.Id] = group t.byDigest[group.Digest] = group.vertex @@ -662,7 +665,11 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) { intervals: make(map[int64]interval), } if t.modeConsole { - t.byDigest[v.Digest].term = vt100.NewVT100(termHeight, termWidth-termPad) + w := termWidth - termPad + if w <= 0 { + w = 1 + } + t.byDigest[v.Digest].term = vt100.NewVT100(termHeight, w) } } t.triggerVertexEvent(v) @@ -673,7 +680,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) { t.vertexes = append(t.vertexes, t.byDigest[v.Digest]) } // allow a duplicate initial vertex that shouldn't reset state - if !(prev != nil && prev.isStarted() && v.Started == nil) { + if prev == nil || !prev.isStarted() || v.Started != nil { t.byDigest[v.Digest].Vertex = v } if v.Started != nil { @@ -765,7 +772,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) { } else if sec < 100 { prec = 2 } - v.logs = append(v.logs, []byte(fmt.Sprintf("%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) + v.logs = append(v.logs, fmt.Appendf(nil, "%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt)) } i++ }) @@ -787,7 +794,7 @@ func (t *trace) printErrorLogs(f io.Writer) { } // printer keeps last logs buffer if v.logsBuffer != nil { - for i := 0; i < v.logsBuffer.Len(); i++ { + for range v.logsBuffer.Len() { if v.logsBuffer.Value != nil { fmt.Fprintln(f, string(v.logsBuffer.Value.([]byte))) } @@ -1000,6 +1007,9 @@ func (disp *ttyDisplay) print(d displayInfo, width, height int, all bool) { } else { out = align(out, "", width) } + if len(out) > width { + out = out[:width] + } fmt.Fprintln(disp.c, out) lineCount := 0 for _, j := range d.jobs { @@ -1071,7 +1081,7 @@ func (disp *ttyDisplay) print(d displayInfo, width, height int, all bool) { } // override previous content if diff := disp.lineCount - lineCount; diff > 0 { - for i := 0; i < diff; i++ { + for range diff { fmt.Fprintln(disp.c, strings.Repeat(" ", width)) } fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go index 338079d47..61443ca33 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go @@ -147,7 +147,7 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) { l = l[v.logsOffset:] fmt.Fprintf(p.w, "%s", l) } else { - fmt.Fprintf(p.w, "#%d %s", v.index, []byte(l)) + fmt.Fprintf(p.w, "#%d %s", v.index, l) } if i != len(v.logs)-1 || !v.logsPartial { diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go index 69f39a30a..63b9253ba 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go @@ -33,7 +33,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { } } if logger != nil { - logger([]byte(fmt.Sprintf("error: %v\n", err.Error()))) + logger(fmt.Appendf(nil, "error: %v\n", err.Error())) } } else { return descs, nil @@ -43,7 +43,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { return nil, err } if logger != nil { - logger([]byte(fmt.Sprintf("retrying in %v\n", backoff))) + logger(fmt.Appendf(nil, "retrying in %v\n", backoff)) } time.Sleep(backoff) backoff *= 2 diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/compress.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/compress.go index 6d9a78f34..e09ea1518 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/compress.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/compress.go @@ -25,7 +25,7 @@ loop0: } // full match, potentially skip all if idx == len(st.Frames)-1 { - if st.Pid == prev.Pid && st.Version == prev.Version && slices.Compare(st.Cmdline, st.Cmdline) == 0 { + if st.Pid == prev.Pid && st.Version == prev.Version && slices.Equal(st.Cmdline, prev.Cmdline) { continue loop0 } } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.go index 458a1a781..d9bacd410 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.go @@ -50,7 +50,7 @@ func Traces(err error) []*Stack { func traces(err error) []*Stack { var st []*Stack - switch e := err.(type) { + switch e := err.(type) { //nolint:errorlint case interface{ Unwrap() error }: st = Traces(e.Unwrap()) case interface{ Unwrap() []error }: @@ -63,7 +63,7 @@ func traces(err error) []*Stack { } } - switch ste := err.(type) { + switch ste := err.(type) { //nolint:errorlint case interface{ StackTrace() errors.StackTrace }: st = append(st, convertStack(ste.StackTrace())) case interface{ StackTrace() *Stack }: @@ -85,7 +85,7 @@ func Enable(err error) error { } func Wrap(err error, s *Stack) error { - return &withStack{stack: s, error: err} + return &withStackError{stack: s, error: err} } func hasLocalStackTrace(err error) bool { @@ -173,15 +173,15 @@ func convertStack(s errors.StackTrace) *Stack { return &out } -type withStack struct { +type withStackError struct { stack *Stack error } -func (e *withStack) Unwrap() error { +func (e *withStackError) Unwrap() error { return e.error } -func (e *withStack) StackTrace() *Stack { +func (e *withStackError) StackTrace() *Stack { return e.stack } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.pb.go index 452dbd15f..ef36670a8 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/stack/stack.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 +// protoc-gen-go v1.36.6 // protoc v3.11.4 // source: github.com/moby/buildkit/util/stack/stack.proto @@ -11,6 +11,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -21,15 +22,14 @@ const ( ) type Stack struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` + Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` + Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` unknownFields protoimpl.UnknownFields - - Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` - Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` - Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Stack) Reset() { @@ -98,13 +98,12 @@ func (x *Stack) GetRevision() string { } type Frame struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` + Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` - Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Frame) Reset() { @@ -160,37 +159,28 @@ func (x *Frame) GetLine() int32 { var File_github_com_moby_buildkit_util_stack_stack_proto protoreflect.FileDescriptor -var file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, 0x62, - 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, 0x2f, - 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x05, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x22, 0x8f, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, - 0x63, 0x6b, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x2e, 0x46, 0x72, 0x61, 0x6d, 0x65, - 0x52, 0x06, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6d, 0x64, 0x6c, - 0x69, 0x6e, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6d, 0x64, 0x6c, 0x69, - 0x6e, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x03, 0x70, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x05, 0x46, 0x72, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4c, - 0x69, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x4c, 0x69, 0x6e, 0x65, 0x42, - 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6d, 0x6f, - 0x62, 0x79, 0x2f, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x6b, 0x69, 0x74, 0x2f, 0x75, 0x74, 0x69, 0x6c, - 0x2f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = "" + + "\n" + + "/github.com/moby/buildkit/util/stack/stack.proto\x12\x05stack\"\x8f\x01\n" + + "\x05Stack\x12$\n" + + "\x06frames\x18\x01 \x03(\v2\f.stack.FrameR\x06frames\x12\x18\n" + + "\acmdline\x18\x02 \x03(\tR\acmdline\x12\x10\n" + + "\x03pid\x18\x03 \x01(\x05R\x03pid\x12\x18\n" + + "\aversion\x18\x04 \x01(\tR\aversion\x12\x1a\n" + + "\brevision\x18\x05 \x01(\tR\brevision\"C\n" + + "\x05Frame\x12\x12\n" + + "\x04Name\x18\x01 \x01(\tR\x04Name\x12\x12\n" + + "\x04File\x18\x02 \x01(\tR\x04File\x12\x12\n" + + "\x04Line\x18\x03 \x01(\x05R\x04LineB%Z#github.com/moby/buildkit/util/stackb\x06proto3" var ( file_github_com_moby_buildkit_util_stack_stack_proto_rawDescOnce sync.Once - file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData []byte ) func file_github_com_moby_buildkit_util_stack_stack_proto_rawDescGZIP() []byte { file_github_com_moby_buildkit_util_stack_stack_proto_rawDescOnce.Do(func() { - file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData) + file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc), len(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc))) }) return file_github_com_moby_buildkit_util_stack_stack_proto_rawDescData } @@ -218,7 +208,7 @@ func file_github_com_moby_buildkit_util_stack_stack_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc), len(file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc)), NumEnums: 0, NumMessages: 2, NumExtensions: 0, @@ -229,7 +219,6 @@ func file_github_com_moby_buildkit_util_stack_stack_proto_init() { MessageInfos: file_github_com_moby_buildkit_util_stack_stack_proto_msgTypes, }.Build() File_github_com_moby_buildkit_util_stack_stack_proto = out.File - file_github_com_moby_buildkit_util_stack_stack_proto_rawDesc = nil file_github_com_moby_buildkit_util_stack_stack_proto_goTypes = nil file_github_com_moby_buildkit_util_stack_stack_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path.go index 14f02b495..fdd410386 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path.go @@ -2,7 +2,6 @@ package system import ( "path" - "path/filepath" "strings" "github.com/pkg/errors" @@ -46,7 +45,7 @@ func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, err } var err error - parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS) + parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS, keepSlash) if err != nil { return "", errors.Wrap(err, "removing drive letter") } @@ -61,7 +60,7 @@ func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, err newPath = parent } - newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS) + newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS, keepSlash) if err != nil { return "", errors.Wrap(err, "removing drive letter") } @@ -137,7 +136,7 @@ func IsAbs(pth, inputOS string) bool { if inputOS == "" { inputOS = "linux" } - cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS) + cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS, false) if err != nil { return false } @@ -174,7 +173,7 @@ func IsAbs(pth, inputOS string) bool { // There is no sane way to support this without adding a lot of complexity // which I am not sure is worth it. // \\.\C$\a --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, error) { +func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string, keepSlash bool) (string, error) { if inputOS == "" { inputOS = "linux" } @@ -193,9 +192,10 @@ func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, } parts := strings.SplitN(path, ":", 2) + // Path does not have a drive letter. Just return it. if len(parts) < 2 { - return ToSlash(filepath.Clean(path), inputOS), nil + return ToSlash(cleanPath(path, inputOS, keepSlash), inputOS), nil } // We expect all paths to be in C: @@ -220,5 +220,30 @@ func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, // // We must return the second element of the split path, as is, without attempting to convert // it to an absolute path. We have no knowledge of the CWD; that is treated elsewhere. - return ToSlash(filepath.Clean(parts[1]), inputOS), nil + return ToSlash(cleanPath(parts[1], inputOS, keepSlash), inputOS), nil +} + +// An adaptation of filepath.Clean to allow an option to +// retain the trailing slash, on either of the platforms. +// See https://github.com/moby/buildkit/issues/5249 +func cleanPath(origPath, inputOS string, keepSlash bool) string { + // so as to handle cases like \\a\\b\\..\\c\\ + // on Linux, when inputOS is Windows + origPath = ToSlash(origPath, inputOS) + + if !keepSlash { + return path.Clean(origPath) + } + + cleanedPath := path.Clean(origPath) + // Windows supports both \\ and / as path separator. + hasTrailingSlash := strings.HasSuffix(origPath, "/") + if inputOS == "windows" { + hasTrailingSlash = hasTrailingSlash || strings.HasSuffix(origPath, "\\") + } + + if len(cleanedPath) > 1 && hasTrailingSlash { + return cleanedPath + "/" + } + return cleanedPath } diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path_unix.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path_unix.go new file mode 100644 index 000000000..55391783f --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path_unix.go @@ -0,0 +1,17 @@ +//go:build !windows + +package system + +import "path/filepath" + +// IsAbsolutePath is just a wrapper that calls filepath.IsAbs. +// Has been added here just for symmetry with Windows. +func IsAbsolutePath(path string) bool { + return filepath.IsAbs(path) +} + +// GetAbsolutePath does nothing on non-Windows, just returns +// the same path. +func GetAbsolutePath(path string) string { + return path +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path_windows.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path_windows.go new file mode 100644 index 000000000..726d1f2dd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -0,0 +1,29 @@ +package system + +import ( + "path/filepath" + "strings" +) + +// DefaultSystemVolumeName is the default system volume label on Windows +const DefaultSystemVolumeName = "C:" + +// IsAbsolutePath prepends the default system volume label +// to the path that is presumed absolute, and then calls filepath.IsAbs +func IsAbsolutePath(path string) bool { + path = filepath.Clean(path) + if strings.HasPrefix(path, "\\") { + path = DefaultSystemVolumeName + path + } + return filepath.IsAbs(path) +} + +// GetAbsolutePath returns an absolute path rooted +// to C:\\ on Windows. +func GetAbsolutePath(path string) string { + path = filepath.Clean(path) + if len(path) >= 2 && strings.EqualFold(path[:2], DefaultSystemVolumeName) { + return path + } + return DefaultSystemVolumeName + path +} diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go index 5e15772d5..843e82613 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go @@ -72,7 +72,7 @@ func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.Resourc ctx, cancel := c.connection.ContextWithStop(ctx) defer func() { cancel(errors.WithStack(context.Canceled)) }() ctx, tCancel := context.WithCancelCause(ctx) - ctx, _ = context.WithTimeoutCause(ctx, 30*time.Second, errors.WithStack(context.DeadlineExceeded)) + ctx, _ = context.WithTimeoutCause(ctx, 30*time.Second, errors.WithStack(context.DeadlineExceeded)) //nolint:govet defer tCancel(errors.WithStack(context.Canceled)) ctx = c.connection.ContextWithMetadata(ctx) diff --git a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/tracing.go index 2df1c4627..0d4fed97f 100644 --- a/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/tracing.go +++ b/src/cmd/linuxkit/vendor/github.com/moby/buildkit/util/tracing/tracing.go @@ -6,6 +6,9 @@ import ( "net/http" "net/http/httptrace" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/stack" + "github.com/pkg/errors" "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/otel/attribute" @@ -14,11 +17,6 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.26.0" "go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace/noop" - - "github.com/pkg/errors" - - "github.com/moby/buildkit/util/bklog" - "github.com/moby/buildkit/util/stack" ) // StartSpan starts a new span as a child of the span in context. @@ -35,21 +33,9 @@ func StartSpan(ctx context.Context, operationName string, opts ...trace.SpanStar } func hasStacktrace(err error) bool { - switch e := err.(type) { - case interface{ StackTrace() *stack.Stack }: - return true - case interface{ StackTrace() errors.StackTrace }: - return true - case interface{ Unwrap() error }: - return hasStacktrace(e.Unwrap()) - case interface{ Unwrap() []error }: - for _, ue := range e.Unwrap() { - if hasStacktrace(ue) { - return true - } - } - } - return false + var stack interface{ StackTrace() *stack.Stack } + var pkgStack interface{ StackTrace() errors.StackTrace } + return errors.As(err, &stack) || errors.As(err, &pkgStack) } // FinishWithError finalizes the span and sets the error if one is passed diff --git a/src/cmd/linuxkit/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/src/cmd/linuxkit/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 7069ae44d..c3897c7ca 100644 --- a/src/cmd/linuxkit/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/src/cmd/linuxkit/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -22,7 +22,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 1 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go b/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go index d1236ba72..1aa0693b5 100644 --- a/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go +++ b/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/config.go @@ -83,7 +83,7 @@ type Process struct { // Rlimits specifies rlimit options to apply to the process. Rlimits []POSIXRlimit `json:"rlimits,omitempty" platform:"linux,solaris,zos"` // NoNewPrivileges controls whether additional privileges could be gained by processes in the container. - NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux"` + NoNewPrivileges bool `json:"noNewPrivileges,omitempty" platform:"linux,zos"` // ApparmorProfile specifies the apparmor profile for the container. ApparmorProfile string `json:"apparmorProfile,omitempty" platform:"linux"` // Specify an oom_score_adj for the container. @@ -94,10 +94,12 @@ type Process struct { SelinuxLabel string `json:"selinuxLabel,omitempty" platform:"linux"` // IOPriority contains the I/O priority settings for the cgroup. IOPriority *LinuxIOPriority `json:"ioPriority,omitempty" platform:"linux"` + // ExecCPUAffinity specifies CPU affinity for exec processes. + ExecCPUAffinity *CPUAffinity `json:"execCPUAffinity,omitempty" platform:"linux"` } // LinuxCapabilities specifies the list of allowed capabilities that are kept for a process. -// http://man7.org/linux/man-pages/man7/capabilities.7.html +// https://man7.org/linux/man-pages/man7/capabilities.7.html type LinuxCapabilities struct { // Bounding is the set of capabilities checked by the kernel. Bounding []string `json:"bounding,omitempty" platform:"linux"` @@ -127,6 +129,12 @@ const ( IOPRIO_CLASS_IDLE IOPriorityClass = "IOPRIO_CLASS_IDLE" ) +// CPUAffinity specifies process' CPU affinity. +type CPUAffinity struct { + Initial string `json:"initial,omitempty"` + Final string `json:"final,omitempty"` +} + // Box specifies dimensions of a rectangle. Used for specifying the size of a console. type Box struct { // Height is the vertical dimension of a box. @@ -627,6 +635,17 @@ type WindowsCPUResources struct { // cycles per 10,000 cycles. Set processor `maximum` to a percentage times // 100. Maximum *uint16 `json:"maximum,omitempty"` + // Set of CPUs to affinitize for this container. + Affinity []WindowsCPUGroupAffinity `json:"affinity,omitempty"` +} + +// Similar to _GROUP_AFFINITY struct defined in +// https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/miniport/ns-miniport-_group_affinity +type WindowsCPUGroupAffinity struct { + // CPU mask relative to this CPU group. + Mask uint64 `json:"mask,omitempty"` + // Processor group the mask refers to, as returned by GetLogicalProcessorInformationEx. + Group uint32 `json:"group,omitempty"` } // WindowsStorageResources contains storage resource management settings. @@ -751,6 +770,10 @@ const ( ArchPARISC Arch = "SCMP_ARCH_PARISC" ArchPARISC64 Arch = "SCMP_ARCH_PARISC64" ArchRISCV64 Arch = "SCMP_ARCH_RISCV64" + ArchLOONGARCH64 Arch = "SCMP_ARCH_LOONGARCH64" + ArchM68K Arch = "SCMP_ARCH_M68K" + ArchSH Arch = "SCMP_ARCH_SH" + ArchSHEB Arch = "SCMP_ARCH_SHEB" ) // LinuxSeccompAction taken upon Seccomp rule match @@ -826,28 +849,33 @@ type LinuxIntelRdt struct { // ZOS contains platform-specific configuration for z/OS based containers. type ZOS struct { - // Devices are a list of device nodes that are created for the container - Devices []ZOSDevice `json:"devices,omitempty"` + // Namespaces contains the namespaces that are created and/or joined by the container + Namespaces []ZOSNamespace `json:"namespaces,omitempty"` } -// ZOSDevice represents the mknod information for a z/OS special device file -type ZOSDevice struct { - // Path to the device. - Path string `json:"path"` - // Device type, block, char, etc. - Type string `json:"type"` - // Major is the device's major number. - Major int64 `json:"major"` - // Minor is the device's minor number. - Minor int64 `json:"minor"` - // FileMode permission bits for the device. - FileMode *os.FileMode `json:"fileMode,omitempty"` - // UID of the device. - UID *uint32 `json:"uid,omitempty"` - // Gid of the device. - GID *uint32 `json:"gid,omitempty"` +// ZOSNamespace is the configuration for a z/OS namespace +type ZOSNamespace struct { + // Type is the type of namespace + Type ZOSNamespaceType `json:"type"` + // Path is a path to an existing namespace persisted on disk that can be joined + // and is of the same type + Path string `json:"path,omitempty"` } +// ZOSNamespaceType is one of the z/OS namespaces +type ZOSNamespaceType string + +const ( + // PIDNamespace for isolating process IDs + ZOSPIDNamespace ZOSNamespaceType = "pid" + // MountNamespace for isolating mount points + ZOSMountNamespace ZOSNamespaceType = "mount" + // IPCNamespace for isolating System V IPC, POSIX message queues + ZOSIPCNamespace ZOSNamespaceType = "ipc" + // UTSNamespace for isolating hostname and NIS domain name + ZOSUTSNamespace ZOSNamespaceType = "uts" +) + // LinuxSchedulerPolicy represents different scheduling policies used with the Linux Scheduler type LinuxSchedulerPolicy string diff --git a/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 503971e05..23234a9c5 100644 --- a/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/src/cmd/linuxkit/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -8,7 +8,7 @@ const ( // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 2 // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 + VersionPatch = 1 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "" diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go index fb1d5918b..abc860a49 100644 --- a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/cjson/canonicaljson.go @@ -6,8 +6,8 @@ import ( "errors" "fmt" "reflect" - "regexp" "sort" + "strings" ) /* @@ -18,8 +18,12 @@ escaping backslashes ("\") and double quotes (") and wrapping the resulting string in double quotes ("). */ func encodeCanonicalString(s string) string { - re := regexp.MustCompile(`([\"\\])`) - return fmt.Sprintf("\"%s\"", re.ReplaceAllString(s, "\\$1")) + // Escape backslashes + s = strings.ReplaceAll(s, "\\", "\\\\") + // Escape double quotes + s = strings.ReplaceAll(s, "\"", "\\\"") + // Wrap with double quotes + return fmt.Sprintf("\"%s\"", s) } /* @@ -28,16 +32,7 @@ object according to the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON) and write it to the passed *bytes.Buffer. If canonicalization fails it returns an error. */ -func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { - // Since this function is called recursively, we use panic if an error occurs - // and recover in a deferred function, which is always called before - // returning. There we set the error that is returned eventually. - defer func() { - if r := recover(); r != nil { - err = errors.New(r.(string)) - } - }() - +func encodeCanonical(obj interface{}, result *strings.Builder) (err error) { switch objAsserted := obj.(type) { case string: result.WriteString(encodeCanonicalString(objAsserted)) @@ -90,10 +85,9 @@ func encodeCanonical(obj interface{}, result *bytes.Buffer) (err error) { // Canonicalize map for i, key := range mapKeys { - // Note: `key` must be a `string` (see `case map[string]interface{}`) and - // canonicalization of strings cannot err out (see `case string`), thus - // no error handling is needed here. - encodeCanonical(key, result) + if err := encodeCanonical(key, result); err != nil { + return err + } result.WriteString(":") if err := encodeCanonical(objAsserted[key], result); err != nil { @@ -120,7 +114,16 @@ slice. It uses the OLPC canonical JSON specification (see http://wiki.laptop.org/go/Canonical_JSON). If canonicalization fails the byte slice is nil and the second return value contains the error. */ -func EncodeCanonical(obj interface{}) ([]byte, error) { +func EncodeCanonical(obj interface{}) (out []byte, err error) { + // We use panic if an error occurs and recover in a deferred function, + // which is always called before returning. + // There we set the error that is returned eventually. + defer func() { + if r := recover(); r != nil { + err = errors.New(r.(string)) + } + }() + // FIXME: Terrible hack to turn the passed struct into a map, converting // the struct's variable names to the json key names defined in the struct data, err := json.Marshal(obj) @@ -136,10 +139,13 @@ func EncodeCanonical(obj interface{}) ([]byte, error) { } // Create a buffer and write the canonicalized JSON bytes to it - var result bytes.Buffer + var result strings.Builder + // Allocate output result buffer with the input size. + result.Grow(len(data)) + // Recursively encode the jsonmap if err := encodeCanonical(jsonMap, &result); err != nil { return nil, err } - return result.Bytes(), nil + return []byte(result.String()), nil } diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go new file mode 100644 index 000000000..ed223e90b --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/envelope.go @@ -0,0 +1,64 @@ +package dsse + +import ( + "encoding/base64" + "fmt" +) + +/* +Envelope captures an envelope as described by the DSSE specification. See here: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +type Envelope struct { + PayloadType string `json:"payloadType"` + Payload string `json:"payload"` + Signatures []Signature `json:"signatures"` +} + +/* +DecodeB64Payload returns the serialized body, decoded from the envelope's +payload field. A flexible decoder is used, first trying standard base64, then +URL-encoded base64. +*/ +func (e *Envelope) DecodeB64Payload() ([]byte, error) { + return b64Decode(e.Payload) +} + +/* +Signature represents a generic in-toto signature that contains the identifier +of the key which was used to create the signature. +The used signature scheme has to be agreed upon by the signer and verifer +out of band. +The signature is a base64 encoding of the raw bytes from the signature +algorithm. +*/ +type Signature struct { + KeyID string `json:"keyid"` + Sig string `json:"sig"` +} + +/* +PAE implementes the DSSE Pre-Authentic Encoding +https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition +*/ +func PAE(payloadType string, payload []byte) []byte { + return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", + len(payloadType), payloadType, + len(payload), payload)) +} + +/* +Both standard and url encoding are allowed: +https://github.com/secure-systems-lab/dsse/blob/master/envelope.md +*/ +func b64Decode(s string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + b, err = base64.URLEncoding.DecodeString(s) + if err != nil { + return nil, fmt.Errorf("unable to base64 decode payload (is payload in the right format?)") + } + } + + return b, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go index 3dc05a429..85aed102d 100644 --- a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/sign.go @@ -5,111 +5,35 @@ https://github.com/secure-systems-lab/dsse package dsse import ( + "context" "encoding/base64" "errors" - "fmt" ) -// ErrUnknownKey indicates that the implementation does not recognize the -// key. -var ErrUnknownKey = errors.New("unknown key") - -// ErrNoSignature indicates that an envelope did not contain any signatures. -var ErrNoSignature = errors.New("no signature found") - // ErrNoSigners indicates that no signer was provided. var ErrNoSigners = errors.New("no signers provided") -/* -Envelope captures an envelope as described by the Secure Systems Lab -Signing Specification. See here: -https://github.com/secure-systems-lab/signing-spec/blob/master/envelope.md -*/ -type Envelope struct { - PayloadType string `json:"payloadType"` - Payload string `json:"payload"` - Signatures []Signature `json:"signatures"` -} - -/* -DecodeB64Payload returns the serialized body, decoded -from the envelope's payload field. A flexible -decoder is used, first trying standard base64, then -URL-encoded base64. -*/ -func (e *Envelope) DecodeB64Payload() ([]byte, error) { - return b64Decode(e.Payload) -} - -/* -Signature represents a generic in-toto signature that contains the identifier -of the key which was used to create the signature. -The used signature scheme has to be agreed upon by the signer and verifer -out of band. -The signature is a base64 encoding of the raw bytes from the signature -algorithm. -*/ -type Signature struct { - KeyID string `json:"keyid"` - Sig string `json:"sig"` -} - -/* -PAE implementes the DSSE Pre-Authentic Encoding -https://github.com/secure-systems-lab/dsse/blob/master/protocol.md#signature-definition -*/ -func PAE(payloadType string, payload []byte) []byte { - return []byte(fmt.Sprintf("DSSEv1 %d %s %d %s", - len(payloadType), payloadType, - len(payload), payload)) -} - -/* -Signer defines the interface for an abstract signing algorithm. -The Signer interface is used to inject signature algorithm implementations -into the EnevelopeSigner. This decoupling allows for any signing algorithm -and key management system can be used. -The full message is provided as the parameter. If the signature algorithm -depends on hashing of the message prior to signature calculation, the -implementor of this interface must perform such hashing. -The function must return raw bytes representing the calculated signature -using the current algorithm, and the key used (if applicable). -For an example see EcdsaSigner in sign_test.go. -*/ -type Signer interface { - Sign(data []byte) ([]byte, error) - KeyID() (string, error) -} - -// SignVerifer provides both the signing and verification interface. -type SignVerifier interface { - Signer - Verifier -} - // EnvelopeSigner creates signed Envelopes. type EnvelopeSigner struct { - providers []SignVerifier - ev *EnvelopeVerifier + providers []SignerVerifier } /* -NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer -algorithms to sign the data. -Creates a verifier with threshold=1, at least one of the providers must validate signitures successfully. +NewEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer algorithms to +sign the data. Creates a verifier with threshold=1, at least one of the +providers must validate signatures successfully. */ -func NewEnvelopeSigner(p ...SignVerifier) (*EnvelopeSigner, error) { +func NewEnvelopeSigner(p ...SignerVerifier) (*EnvelopeSigner, error) { return NewMultiEnvelopeSigner(1, p...) } /* NewMultiEnvelopeSigner creates an EnvelopeSigner that uses 1+ Signer -algorithms to sign the data. -Creates a verifier with threshold. -threashold indicates the amount of providers that must validate the envelope. +algorithms to sign the data. Creates a verifier with threshold. Threshold +indicates the amount of providers that must validate the envelope. */ -func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, error) { - var providers []SignVerifier +func NewMultiEnvelopeSigner(threshold int, p ...SignerVerifier) (*EnvelopeSigner, error) { + var providers []SignerVerifier for _, sv := range p { if sv != nil { @@ -121,19 +45,8 @@ func NewMultiEnvelopeSigner(threshold int, p ...SignVerifier) (*EnvelopeSigner, return nil, ErrNoSigners } - evps := []Verifier{} - for _, p := range providers { - evps = append(evps, p.(Verifier)) - } - - ev, err := NewMultiEnvelopeVerifier(threshold, evps...) - if err != nil { - return nil, err - } - return &EnvelopeSigner{ providers: providers, - ev: ev, }, nil } @@ -143,7 +56,7 @@ Returned is an envelope as defined here: https://github.com/secure-systems-lab/dsse/blob/master/envelope.md One signature will be added for each Signer in the EnvelopeSigner. */ -func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelope, error) { +func (es *EnvelopeSigner) SignPayload(ctx context.Context, payloadType string, body []byte) (*Envelope, error) { var e = Envelope{ Payload: base64.StdEncoding.EncodeToString(body), PayloadType: payloadType, @@ -152,7 +65,7 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop paeEnc := PAE(payloadType, body) for _, signer := range es.providers { - sig, err := signer.Sign(paeEnc) + sig, err := signer.Sign(ctx, paeEnc) if err != nil { return nil, err } @@ -169,29 +82,3 @@ func (es *EnvelopeSigner) SignPayload(payloadType string, body []byte) (*Envelop return &e, nil } - -/* -Verify decodes the payload and verifies the signature. -Any domain specific validation such as parsing the decoded body and -validating the payload type is left out to the caller. -Verify returns a list of accepted keys each including a keyid, public and signiture of the accepted provider keys. -*/ -func (es *EnvelopeSigner) Verify(e *Envelope) ([]AcceptedKey, error) { - return es.ev.Verify(e) -} - -/* -Both standard and url encoding are allowed: -https://github.com/secure-systems-lab/dsse/blob/master/envelope.md -*/ -func b64Decode(s string) ([]byte, error) { - b, err := base64.StdEncoding.DecodeString(s) - if err != nil { - b, err = base64.URLEncoding.DecodeString(s) - if err != nil { - return nil, err - } - } - - return b, nil -} diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go new file mode 100644 index 000000000..99d03c7df --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/signerverifier.go @@ -0,0 +1,43 @@ +package dsse + +import ( + "context" + "crypto" +) + +/* +Signer defines the interface for an abstract signing algorithm. The Signer +interface is used to inject signature algorithm implementations into the +EnvelopeSigner. This decoupling allows for any signing algorithm and key +management system can be used. The full message is provided as the parameter. +If the signature algorithm depends on hashing of the message prior to signature +calculation, the implementor of this interface must perform such hashing. The +function must return raw bytes representing the calculated signature using the +current algorithm, and the key used (if applicable). +*/ +type Signer interface { + Sign(ctx context.Context, data []byte) ([]byte, error) + KeyID() (string, error) +} + +/* +Verifier verifies a complete message against a signature and key. If the message +was hashed prior to signature generation, the verifier must perform the same +steps. If KeyID returns successfully, only signature matching the key ID will be +verified. +*/ +type Verifier interface { + Verify(ctx context.Context, data, sig []byte) error + KeyID() (string, error) + Public() crypto.PublicKey +} + +// SignerVerifier provides both the signing and verification interface. +type SignerVerifier interface { + Signer + Verifier +} + +// Deprecated: switch to renamed SignerVerifier. This is currently aliased for +// backwards compatibility. +type SignVerifier = SignerVerifier diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go index ead1c32ca..a36146b82 100644 --- a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/dsse/verify.go @@ -1,6 +1,7 @@ package dsse import ( + "context" "crypto" "errors" "fmt" @@ -8,17 +9,8 @@ import ( "golang.org/x/crypto/ssh" ) -/* -Verifier verifies a complete message against a signature and key. -If the message was hashed prior to signature generation, the verifier -must perform the same steps. -If KeyID returns successfully, only signature matching the key ID will be verified. -*/ -type Verifier interface { - Verify(data, sig []byte) error - KeyID() (string, error) - Public() crypto.PublicKey -} +// ErrNoSignature indicates that an envelope did not contain any signatures. +var ErrNoSignature = errors.New("no signature found") type EnvelopeVerifier struct { providers []Verifier @@ -31,7 +23,7 @@ type AcceptedKey struct { Sig Signature } -func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { +func (ev *EnvelopeVerifier) Verify(ctx context.Context, e *Envelope) ([]AcceptedKey, error) { if e == nil { return nil, errors.New("cannot verify a nil envelope") } @@ -78,7 +70,7 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { continue } - err = v.Verify(paeEnc, sig) + err = v.Verify(ctx, paeEnc, sig) if err != nil { continue } @@ -104,11 +96,11 @@ func (ev *EnvelopeVerifier) Verify(e *Envelope) ([]AcceptedKey, error) { // Sanity if with some reflect magic this happens. if ev.threshold <= 0 || ev.threshold > len(ev.providers) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } if len(usedKeyids) < ev.threshold { - return acceptedKeys, errors.New(fmt.Sprintf("Accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold)) + return acceptedKeys, fmt.Errorf("accepted signatures do not match threshold, Found: %d, Expected %d", len(acceptedKeys), ev.threshold) } return acceptedKeys, nil @@ -119,15 +111,15 @@ func NewEnvelopeVerifier(v ...Verifier) (*EnvelopeVerifier, error) { } func NewMultiEnvelopeVerifier(threshold int, p ...Verifier) (*EnvelopeVerifier, error) { - if threshold <= 0 || threshold > len(p) { - return nil, errors.New("Invalid threshold") + return nil, errors.New("invalid threshold") } ev := EnvelopeVerifier{ providers: p, threshold: threshold, } + return &ev, nil } diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go new file mode 100644 index 000000000..578d6a548 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ecdsa.go @@ -0,0 +1,111 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/sha256" + "crypto/sha512" + "fmt" + "os" +) + +const ECDSAKeyType = "ecdsa" + +// ECDSASignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using ECDSA keys. +type ECDSASignerVerifier struct { + keyID string + curveSize int + private *ecdsa.PrivateKey + public *ecdsa.PublicKey +} + +// NewECDSASignerVerifierFromSSLibKey creates an ECDSASignerVerifier from an +// SSLibKey. +func NewECDSASignerVerifierFromSSLibKey(key *SSLibKey) (*ECDSASignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv := &ECDSASignerVerifier{ + keyID: key.KeyID, + curveSize: publicParsedKey.(*ecdsa.PublicKey).Params().BitSize, + public: publicParsedKey.(*ecdsa.PublicKey), + private: nil, + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create ECDSA signerverifier: %w", err) + } + + sv.private = privateParsedKey.(*ecdsa.PrivateKey) + } + + return sv, nil +} + +// Sign creates a signature for `data`. +func (sv *ECDSASignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := getECDSAHashedData(data, sv.curveSize) + + return ecdsa.SignASN1(rand.Reader, sv.private, hashedData) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ECDSASignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := getECDSAHashedData(data, sv.curveSize) + + if ok := ecdsa.VerifyASN1(sv.public, hashedData, sig); !ok { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ECDSASignerVerifier instance. +func (sv *ECDSASignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadECDSAKeyFromFile returns an SSLibKey instance for an ECDSA key stored in +// a file in the custom securesystemslib format. +func LoadECDSAKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ECDSA key from file: %w", err) + } + + return loadKeyFromSSLibBytes(contents) +} + +func getECDSAHashedData(data []byte, curveSize int) []byte { + switch { + case curveSize <= 256: + return hashBeforeSigning(data, sha256.New()) + case 256 < curveSize && curveSize <= 384: + return hashBeforeSigning(data, sha512.New384()) + case curveSize > 384: + return hashBeforeSigning(data, sha512.New()) + } + return []byte{} +} diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go new file mode 100644 index 000000000..c71d313a7 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/ed25519.go @@ -0,0 +1,98 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/ed25519" + "encoding/hex" + "fmt" + "os" +) + +const ED25519KeyType = "ed25519" + +// ED25519SignerVerifier is a dsse.SignerVerifier compliant interface to sign +// and verify signatures using ED25519 keys. +type ED25519SignerVerifier struct { + keyID string + private ed25519.PrivateKey + public ed25519.PublicKey +} + +// NewED25519SignerVerifierFromSSLibKey creates an Ed25519SignerVerifier from an +// SSLibKey. +func NewED25519SignerVerifierFromSSLibKey(key *SSLibKey) (*ED25519SignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + public, err := hex.DecodeString(key.KeyVal.Public) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + var private []byte + if len(key.KeyVal.Private) > 0 { + private, err = hex.DecodeString(key.KeyVal.Private) + if err != nil { + return nil, fmt.Errorf("unable to create ED25519 signerverifier: %w", err) + } + + // python-securesystemslib provides an interface to generate ed25519 + // keys but it differs slightly in how it serializes the key to disk. + // Specifically, the keyval.private field includes _only_ the private + // portion of the key while libraries such as crypto/ed25519 also expect + // the public portion. So, if the private portion is half of what we + // expect, we append the public portion as well. + if len(private) == ed25519.PrivateKeySize/2 { + private = append(private, public...) + } + } + + return &ED25519SignerVerifier{ + keyID: key.KeyID, + public: ed25519.PublicKey(public), + private: ed25519.PrivateKey(private), + }, nil +} + +// Sign creates a signature for `data`. +func (sv *ED25519SignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if len(sv.private) == 0 { + return nil, ErrNotPrivateKey + } + + signature := ed25519.Sign(sv.private, data) + return signature, nil +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *ED25519SignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + if ok := ed25519.Verify(sv.public, data, sig); ok { + return nil + } + return ErrSignatureVerificationFailed +} + +// KeyID returns the identifier of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// ED25519SignerVerifier instance. +func (sv *ED25519SignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadED25519KeyFromFile returns an SSLibKey instance for an ED25519 key stored +// in a file in the custom securesystemslib format. +func LoadED25519KeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load ED25519 key from file: %w", err) + } + + return loadKeyFromSSLibBytes(contents) +} diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go new file mode 100644 index 000000000..3612f28a4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/rsa.go @@ -0,0 +1,141 @@ +package signerverifier + +import ( + "context" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "fmt" + "os" + "strings" +) + +const ( + RSAKeyType = "rsa" + RSAKeyScheme = "rsassa-pss-sha256" + RSAPrivateKeyPEM = "RSA PRIVATE KEY" +) + +// RSAPSSSignerVerifier is a dsse.SignerVerifier compliant interface to sign and +// verify signatures using RSA keys following the RSA-PSS scheme. +type RSAPSSSignerVerifier struct { + keyID string + private *rsa.PrivateKey + public *rsa.PublicKey +} + +// NewRSAPSSSignerVerifierFromSSLibKey creates an RSAPSSSignerVerifier from an +// SSLibKey. +func NewRSAPSSSignerVerifierFromSSLibKey(key *SSLibKey) (*RSAPSSSignerVerifier, error) { + if len(key.KeyVal.Public) == 0 { + return nil, ErrInvalidKey + } + + _, publicParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Public)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + if len(key.KeyVal.Private) > 0 { + _, privateParsedKey, err := decodeAndParsePEM([]byte(key.KeyVal.Private)) + if err != nil { + return nil, fmt.Errorf("unable to create RSA-PSS signerverifier: %w", err) + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: privateParsedKey.(*rsa.PrivateKey), + }, nil + } + + return &RSAPSSSignerVerifier{ + keyID: key.KeyID, + public: publicParsedKey.(*rsa.PublicKey), + private: nil, + }, nil +} + +// Sign creates a signature for `data`. +func (sv *RSAPSSSignerVerifier) Sign(ctx context.Context, data []byte) ([]byte, error) { + if sv.private == nil { + return nil, ErrNotPrivateKey + } + + hashedData := hashBeforeSigning(data, sha256.New()) + + return rsa.SignPSS(rand.Reader, sv.private, crypto.SHA256, hashedData, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}) +} + +// Verify verifies the `sig` value passed in against `data`. +func (sv *RSAPSSSignerVerifier) Verify(ctx context.Context, data []byte, sig []byte) error { + hashedData := hashBeforeSigning(data, sha256.New()) + + if err := rsa.VerifyPSS(sv.public, crypto.SHA256, hashedData, sig, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256}); err != nil { + return ErrSignatureVerificationFailed + } + + return nil +} + +// KeyID returns the identifier of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) KeyID() (string, error) { + return sv.keyID, nil +} + +// Public returns the public portion of the key used to create the +// RSAPSSSignerVerifier instance. +func (sv *RSAPSSSignerVerifier) Public() crypto.PublicKey { + return sv.public +} + +// LoadRSAPSSKeyFromFile returns an SSLibKey instance for an RSA key stored in a +// file. +func LoadRSAPSSKeyFromFile(path string) (*SSLibKey, error) { + contents, err := os.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + pemData, keyObj, err := decodeAndParsePEM(contents) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + + key := &SSLibKey{ + KeyType: RSAKeyType, + Scheme: RSAKeyScheme, + KeyIDHashAlgorithms: KeyIDHashAlgorithms, + KeyVal: KeyVal{}, + } + + switch k := keyObj.(type) { + case *rsa.PublicKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))) + + case *rsa.PrivateKey: + pubKeyBytes, err := x509.MarshalPKIXPublicKey(k.Public()) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyVal.Public = strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, PublicKeyPEM))) + key.KeyVal.Private = strings.TrimSpace(string(generatePEMBlock(pemData.Bytes, RSAPrivateKeyPEM))) + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, fmt.Errorf("unable to load RSA key from file: %w", err) + } + key.KeyID = keyID + } + + return key, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go new file mode 100644 index 000000000..5f510f7be --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/signerverifier.go @@ -0,0 +1,34 @@ +package signerverifier + +import ( + "errors" +) + +var KeyIDHashAlgorithms = []string{"sha256", "sha512"} + +var ( + ErrNotPrivateKey = errors.New("loaded key is not a private key") + ErrSignatureVerificationFailed = errors.New("failed to verify signature") + ErrUnknownKeyType = errors.New("unknown key type") + ErrInvalidThreshold = errors.New("threshold is either less than 1 or greater than number of provided public keys") + ErrInvalidKey = errors.New("key object has no value") +) + +const ( + PublicKeyPEM = "PUBLIC KEY" + PrivateKeyPEM = "PRIVATE KEY" +) + +type SSLibKey struct { + KeyIDHashAlgorithms []string `json:"keyid_hash_algorithms"` + KeyType string `json:"keytype"` + KeyVal KeyVal `json:"keyval"` + Scheme string `json:"scheme"` + KeyID string `json:"keyid"` +} + +type KeyVal struct { + Private string `json:"private,omitempty"` + Public string `json:"public"` + Certificate string `json:"certificate,omitempty"` +} diff --git a/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go new file mode 100644 index 000000000..73aaa77d4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/secure-systems-lab/go-securesystemslib/signerverifier/utils.go @@ -0,0 +1,150 @@ +package signerverifier + +import ( + "crypto/sha256" + "crypto/x509" + "encoding/hex" + "encoding/json" + "encoding/pem" + "errors" + "hash" + "testing" + + "github.com/secure-systems-lab/go-securesystemslib/cjson" +) + +/* +Credits: Parts of this file were originally authored for in-toto-golang. +*/ + +var ( + // ErrNoPEMBlock gets triggered when there is no PEM block in the provided file + ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)") + // ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails + ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type") +) + +// loadKeyFromSSLibBytes returns a pointer to a Key instance created from the +// contents of the bytes. The key contents are expected to be in the custom +// securesystemslib format. +func loadKeyFromSSLibBytes(contents []byte) (*SSLibKey, error) { + var key *SSLibKey + if err := json.Unmarshal(contents, &key); err != nil { + return nil, err + } + + if len(key.KeyID) == 0 { + keyID, err := calculateKeyID(key) + if err != nil { + return nil, err + } + key.KeyID = keyID + } + + return key, nil +} + +func calculateKeyID(k *SSLibKey) (string, error) { + key := map[string]any{ + "keytype": k.KeyType, + "scheme": k.Scheme, + "keyid_hash_algorithms": k.KeyIDHashAlgorithms, + "keyval": map[string]string{ + "public": k.KeyVal.Public, + }, + } + canonical, err := cjson.EncodeCanonical(key) + if err != nil { + return "", err + } + digest := sha256.Sum256(canonical) + return hex.EncodeToString(digest[:]), nil +} + +/* +generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType. +If successful it returns a PEM block as []byte slice. This function should always +succeed, if keyBytes is empty the PEM block will have an empty byte block. +Therefore only header and footer will exist. +*/ +func generatePEMBlock(keyBytes []byte, pemType string) []byte { + // construct PEM block + pemBlock := &pem.Block{ + Type: pemType, + Headers: nil, + Bytes: keyBytes, + } + return pem.EncodeToMemory(pemBlock) +} + +/* +decodeAndParsePEM receives potential PEM bytes decodes them via pem.Decode +and pushes them to parseKey. If any error occurs during this process, +the function will return nil and an error (either ErrFailedPEMParsing +or ErrNoPEMBlock). On success it will return the decoded pemData, the +key object interface and nil as error. We need the decoded pemData, +because LoadKey relies on decoded pemData for operating system +interoperability. +*/ +func decodeAndParsePEM(pemBytes []byte) (*pem.Block, any, error) { + // pem.Decode returns the parsed pem block and a rest. + // The rest is everything, that could not be parsed as PEM block. + // Therefore we can drop this via using the blank identifier "_" + data, _ := pem.Decode(pemBytes) + if data == nil { + return nil, nil, ErrNoPEMBlock + } + + // Try to load private key, if this fails try to load + // key as public key + key, err := parsePEMKey(data.Bytes) + if err != nil { + return nil, nil, err + } + return data, key, nil +} + +/* +parseKey tries to parse a PEM []byte slice. Using the following standards +in the given order: + + - PKCS8 + - PKCS1 + - PKIX + +On success it returns the parsed key and nil. +On failure it returns nil and the error ErrFailedPEMParsing +*/ +func parsePEMKey(data []byte) (any, error) { + key, err := x509.ParsePKCS8PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKCS1PrivateKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParsePKIXPublicKey(data) + if err == nil { + return key, nil + } + key, err = x509.ParseECPrivateKey(data) + if err == nil { + return key, nil + } + return nil, ErrFailedPEMParsing +} + +func hashBeforeSigning(data []byte, h hash.Hash) []byte { + h.Write(data) + return h.Sum(nil) +} + +func hexDecode(t *testing.T, data string) []byte { + t.Helper() + b, err := hex.DecodeString(data) + if err != nil { + t.Fatal(err) + } + return b +} diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/json/marshal/json.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/json/marshal/json.go new file mode 100644 index 000000000..b5baefecc --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/json/marshal/json.go @@ -0,0 +1,15 @@ +package marshal + +import ( + "bytes" + "encoding/json" +) + +// JSON marshals the object _without_ escaping HTML +func JSON(obj interface{}) ([]byte, error) { + buf := &bytes.Buffer{} + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(obj) + return bytes.TrimSpace(buf.Bytes()), err +} diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/model.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/model.go index e91856b0e..7d1105ebf 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/model.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/model.go @@ -85,6 +85,9 @@ const ( TypePersistentIdSwh = common.TypePersistentIdSwh TypePersistentIdGitoid = common.TypePersistentIdGitoid + // F.5 Other + CategoryOther = common.CategoryOther + // 11.1 Relationship field types RelationshipDescribes = common.TypeRelationshipDescribe RelationshipDescribedBy = common.TypeRelationshipDescribeBy diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go index e77d7b780..645aa9249 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/annotation.go @@ -3,9 +3,10 @@ package common import ( - "encoding/json" "fmt" "strings" + + "github.com/spdx/tools-golang/json/marshal" ) type Annotator struct { @@ -37,7 +38,7 @@ func (a *Annotator) UnmarshalJSON(data []byte) error { // This function is also used when marshalling to YAML func (a Annotator) MarshalJSON() ([]byte, error) { if a.Annotator != "" { - return json.Marshal(fmt.Sprintf("%s: %s", a.AnnotatorType, a.Annotator)) + return marshal.JSON(fmt.Sprintf("%s: %s", a.AnnotatorType, a.Annotator)) } return []byte{}, nil diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go index c87ae7be9..77b24ee0d 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/creation_info.go @@ -3,9 +3,10 @@ package common import ( - "encoding/json" "fmt" "strings" + + "github.com/spdx/tools-golang/json/marshal" ) // Creator is a wrapper around the Creator SPDX field. The SPDX field contains two values, which requires special @@ -37,7 +38,7 @@ func (c *Creator) UnmarshalJSON(data []byte) error { // This function is also used with marshalling to YAML func (c Creator) MarshalJSON() ([]byte, error) { if c.Creator != "" { - return json.Marshal(fmt.Sprintf("%s: %s", c.CreatorType, c.Creator)) + return marshal.JSON(fmt.Sprintf("%s: %s", c.CreatorType, c.Creator)) } return []byte{}, nil diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go index 8344ac616..2715815e7 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/external.go @@ -21,11 +21,15 @@ const ( TypePackageManagerNuGet string = "nuget" TypePackageManagerBower string = "bower" TypePackageManagerPURL string = "purl" + // F.4 Persistent-Id types CategoryPersistentId string = "PERSISTENT-ID" TypePersistentIdSwh string = "swh" TypePersistentIdGitoid string = "gitoid" + // F.5 Other + CategoryOther string = "OTHER" + // 11.1 Relationship field types TypeRelationshipDescribe string = "DESCRIBES" TypeRelationshipDescribeBy string = "DESCRIBED_BY" diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go index 806a8157e..929e42fa4 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/identifier.go @@ -3,9 +3,10 @@ package common import ( - "encoding/json" "fmt" "strings" + + "github.com/spdx/tools-golang/json/marshal" ) const ( @@ -21,7 +22,7 @@ type ElementID string // MarshalJSON returns an SPDXRef- prefixed JSON string func (d ElementID) MarshalJSON() ([]byte, error) { - return json.Marshal(prefixElementId(d)) + return marshal.JSON(prefixElementId(d)) } // UnmarshalJSON validates SPDXRef- prefixes and removes them when processing ElementIDs @@ -85,11 +86,11 @@ type DocElementID struct { func (d DocElementID) MarshalJSON() ([]byte, error) { if d.DocumentRefID != "" && d.ElementRefID != "" { idStr := prefixElementId(d.ElementRefID) - return json.Marshal(fmt.Sprintf("%s%s:%s", documentRefPrefix, d.DocumentRefID, idStr)) + return marshal.JSON(fmt.Sprintf("%s%s:%s", documentRefPrefix, d.DocumentRefID, idStr)) } else if d.ElementRefID != "" { - return json.Marshal(prefixElementId(d.ElementRefID)) + return marshal.JSON(prefixElementId(d.ElementRefID)) } else if d.SpecialID != "" { - return json.Marshal(d.SpecialID) + return marshal.JSON(d.SpecialID) } return []byte{}, fmt.Errorf("failed to marshal empty DocElementID") diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go index 349cef254..45c73677e 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/common/package.go @@ -3,9 +3,10 @@ package common import ( - "encoding/json" "fmt" "strings" + + "github.com/spdx/tools-golang/json/marshal" ) type Supplier struct { @@ -43,9 +44,9 @@ func (s *Supplier) UnmarshalJSON(data []byte) error { // This function is also used when marshalling to YAML func (s Supplier) MarshalJSON() ([]byte, error) { if s.Supplier == "NOASSERTION" { - return json.Marshal(s.Supplier) + return marshal.JSON(s.Supplier) } else if s.SupplierType != "" && s.Supplier != "" { - return json.Marshal(fmt.Sprintf("%s: %s", s.SupplierType, s.Supplier)) + return marshal.JSON(fmt.Sprintf("%s: %s", s.SupplierType, s.Supplier)) } return []byte{}, fmt.Errorf("failed to marshal invalid Supplier: %+v", s) @@ -84,12 +85,12 @@ func (o *Originator) UnmarshalJSON(data []byte) error { // This function is also used when marshalling to YAML func (o Originator) MarshalJSON() ([]byte, error) { if o.Originator == "NOASSERTION" { - return json.Marshal(o.Originator) - } else if o.Originator != "" { - return json.Marshal(fmt.Sprintf("%s: %s", o.OriginatorType, o.Originator)) + return marshal.JSON(o.Originator) + } else if o.OriginatorType != "" && o.Originator != "" { + return marshal.JSON(fmt.Sprintf("%s: %s", o.OriginatorType, o.Originator)) } - return []byte{}, nil + return []byte{}, fmt.Errorf("failed to marshal invalid Originator: %+v", o) } type PackageVerificationCode struct { diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go index 60a27c44d..bd6b41d4a 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_1/document.go @@ -4,7 +4,7 @@ package v2_1 import ( - "github.com/anchore/go-struct-converter" + converter "github.com/anchore/go-struct-converter" "github.com/spdx/tools-golang/spdx/v2/common" ) diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go index c7dae4423..5fcd33aa4 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/document.go @@ -120,6 +120,14 @@ func (d *Document) UnmarshalJSON(b []byte) error { return fmt.Sprintf("%v-%v->%v", common.RenderDocElementID(refA), rel, common.RenderDocElementID(refB)) } + // remove null relationships + for i := 0; i < len(d.Relationships); i++ { + if d.Relationships[i] == nil { + d.Relationships = append(d.Relationships[0:i], d.Relationships[i+1:]...) + i-- + } + } + // index current list of relationships to ensure no duplication for _, r := range d.Relationships { relationshipExists[serializeRel(r)] = true diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go index 54de537c3..a41471eda 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_2/package.go @@ -6,6 +6,7 @@ import ( "encoding/json" "strings" + "github.com/spdx/tools-golang/json/marshal" "github.com/spdx/tools-golang/spdx/v2/common" ) @@ -74,7 +75,7 @@ type Package struct { // 7.14: All Licenses Info from Files: SPDX License Expression, "NONE" or "NOASSERTION" // Cardinality: mandatory, one or many if filesAnalyzed is true / omitted; // zero (must be omitted) if filesAnalyzed is false - PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles"` + PackageLicenseInfoFromFiles []string `json:"licenseInfoFromFiles,omitempty"` // 7.15: Declared License: SPDX License Expression, "NONE" or "NOASSERTION" // Cardinality: mandatory, one @@ -122,6 +123,32 @@ type Package struct { hasFiles []common.DocElementID } +func (p Package) MarshalJSON() ([]byte, error) { + type pkg Package + p2 := pkg(p) + + data, err := marshal.JSON(p2) + if err != nil { + return nil, err + } + + // remove empty packageVerificationCode entries -- required by SPDX 2.2 but + // omitempty has no effect since it is a non-comparable struct and not a pointer, so we + // manually check to determine if there is a valid value to output and omit the field if not + // see: https://spdx.github.io/spdx-spec/v2.2.2/package-information/#79-package-verification-code-field + if p.PackageVerificationCode.Value == "" && p.PackageVerificationCode.ExcludedFiles == nil { + var values map[string]interface{} + err = json.Unmarshal(data, &values) + if err != nil { + return nil, err + } + delete(values, "packageVerificationCode") + return marshal.JSON(values) + } + + return data, nil +} + func (p *Package) UnmarshalJSON(b []byte) error { type pkg Package type extras struct { @@ -153,6 +180,7 @@ func (p *Package) UnmarshalJSON(b []byte) error { } var _ json.Unmarshaler = (*Package)(nil) +var _ json.Marshaler = (*Package)(nil) // PackageExternalReference is an External Reference to additional info // about a Package, as defined in section 7.21 in version 2.2 of the spec. @@ -199,5 +227,5 @@ func (r *PackageExternalReference) MarshalJSON() ([]byte, error) { rr = ref(*r) rr.Category = strings.ReplaceAll(rr.Category, "-", "_") - return json.Marshal(&rr) + return marshal.JSON(&rr) } diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go index 7350192ae..62b7619b2 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/document.go @@ -119,6 +119,14 @@ func (d *Document) UnmarshalJSON(b []byte) error { return fmt.Sprintf("%v-%v->%v", common.RenderDocElementID(refA), rel, common.RenderDocElementID(refB)) } + // remove null relationships + for i := 0; i < len(d.Relationships); i++ { + if d.Relationships[i] == nil { + d.Relationships = append(d.Relationships[0:i], d.Relationships[i+1:]...) + i-- + } + } + // index current list of relationships to ensure no duplication for _, r := range d.Relationships { relationshipExists[serializeRel(r)] = true diff --git a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go index 0acadc27b..6488a7e8c 100644 --- a/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go +++ b/src/cmd/linuxkit/vendor/github.com/spdx/tools-golang/spdx/v2/v2_3/package.go @@ -6,6 +6,7 @@ import ( "encoding/json" "strings" + "github.com/spdx/tools-golang/json/marshal" "github.com/spdx/tools-golang/spdx/v2/common" ) @@ -217,5 +218,5 @@ func (r *PackageExternalReference) MarshalJSON() ([]byte, error) { rr.Category = strings.ReplaceAll(rr.Category, "_", "-") - return json.Marshal(&rr) + return marshal.JSON(&rr) } diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/buffer.go b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/buffer.go new file mode 100644 index 000000000..09d94acbd --- /dev/null +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/buffer.go @@ -0,0 +1,44 @@ +package fsutil + +import ( + "io" +) + +const chunkSize = 32 * 1024 + +type buffer struct { + chunks [][]byte +} + +func (b *buffer) alloc(n int) []byte { + if n > chunkSize { + buf := make([]byte, n) + b.chunks = append(b.chunks, buf) + return buf + } + + if len(b.chunks) != 0 { + lastChunk := b.chunks[len(b.chunks)-1] + l := len(lastChunk) + if l+n <= cap(lastChunk) { + lastChunk = lastChunk[:l+n] + b.chunks[len(b.chunks)-1] = lastChunk + return lastChunk[l : l+n] + } + } + + buf := make([]byte, n, chunkSize) + b.chunks = append(b.chunks, buf) + return buf +} + +func (b *buffer) WriteTo(w io.Writer) (n int64, err error) { + for _, c := range b.chunks { + m, err := w.Write(c) + n += int64(m) + if err != nil { + return n, err + } + } + return n, nil +} diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go index 2dd3f7d05..d278add87 100644 --- a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go @@ -20,7 +20,7 @@ func rewriteMetadata(p string, stat *types.Stat) error { // handleTarTypeBlockCharFifo is an OS-specific helper function used by // createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { +func handleTarTypeBlockCharFifo(_ string, _ *types.Stat) error { return errors.New("Not implemented on windows") } diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl index ea73bd208..c70ad2b6f 100644 --- a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl @@ -60,7 +60,15 @@ target "test-noroot" { output = ["${DESTDIR}/coverage"] } -target "lint" { +group "lint" { + targets = ["lint-golangci", "lint-gopls"] +} + +group "lint-cross" { + targets = ["lint-golangci-cross", "lint-gopls-cross"] +} + +target "lint-golangci" { inherits = ["_common"] dockerfile = "./hack/dockerfiles/lint.Dockerfile" output = ["type=cacheonly"] @@ -69,8 +77,17 @@ target "lint" { } } -target "lint-cross" { - inherits = ["lint", "_platforms"] +target "lint-gopls" { + inherits = ["lint-golangci"] + target = "gopls-analyze" +} + +target "lint-golangci-cross" { + inherits = ["lint-golangci", "_platforms"] +} + +target "lint-gopls-cross" { + inherits = ["lint-gopls", "_platforms"] } target "validate-generated-files" { diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/receive.go b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/receive.go index 2f94778a2..57d903c5d 100644 --- a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/receive.go +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/receive.go @@ -32,6 +32,7 @@ package fsutil import ( "context" + "encoding/binary" "io" "os" "path/filepath" @@ -51,6 +52,8 @@ const ( DiffContent ) +const metadataPath = ".fsutil-metadata" + type ReceiveOpt struct { NotifyHashed ChangeFunc ContentHasher ContentHasher @@ -58,6 +61,7 @@ type ReceiveOpt struct { Merge bool Filter FilterFunc Differ DiffType + MetadataOnly FilterFunc } func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { @@ -75,21 +79,23 @@ func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) erro merge: opt.Merge, filter: opt.Filter, differ: opt.Differ, + metadataOnly: opt.MetadataOnly, } return r.run(ctx) } type receiver struct { - dest string - conn Stream - files map[string]uint32 - pipes map[uint32]io.WriteCloser - mu sync.RWMutex - muPipes sync.RWMutex - progressCb func(int, bool) - merge bool - filter FilterFunc - differ DiffType + dest string + conn Stream + files map[string]uint32 + pipes map[uint32]io.WriteCloser + mu sync.RWMutex + muPipes sync.RWMutex + progressCb func(int, bool) + merge bool + filter FilterFunc + differ DiffType + metadataOnly FilterFunc notifyHashed ChangeFunc contentHasher ContentHasher @@ -164,6 +170,11 @@ func (r *receiver) run(ctx context.Context) error { } w := newDynamicWalker() + metadataTransfer := r.metadataOnly != nil + // buffer Stat metadata in framed proto + metadataBuffer := &buffer{} + // stack of parent paths that can be replayed if metadata filter matches + metadataParents := newStack[*currentPath]() g.Go(func() (retErr error) { defer func() { @@ -223,10 +234,26 @@ func (r *receiver) run(ctx context.Context) error { // e.g. a linux path foo/bar\baz cannot be represented on windows return errors.WithStack(&os.PathError{Path: p.Stat.Path, Err: syscall.EINVAL, Op: "unrepresentable path"}) } + var metaOnly bool + if metadataTransfer { + if path == metadataPath { + continue + } + n := p.Stat.SizeVT() + dt := metadataBuffer.alloc(n + 4) + binary.LittleEndian.PutUint32(dt[0:4], uint32(n)) + _, err := p.Stat.MarshalToSizedBufferVT(dt[4:]) + if err != nil { + return err + } + if !r.metadataOnly(path, p.Stat) { + metaOnly = true + } + } p.Stat.Path = path p.Stat.Linkname = filepath.FromSlash(p.Stat.Linkname) - if fileCanRequestData(os.FileMode(p.Stat.Mode)) { + if !metaOnly && fileCanRequestData(os.FileMode(p.Stat.Mode)) { r.mu.Lock() r.files[p.Stat.Path] = i r.mu.Unlock() @@ -240,6 +267,31 @@ func (r *receiver) run(ctx context.Context) error { if err := r.hlValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { return err } + if metadataTransfer { + parent := filepath.Dir(cp.path) + isDir := os.FileMode(p.Stat.Mode).IsDir() + for { + last, ok := metadataParents.peek() + if !ok || parent == last.path { + break + } + metadataParents.pop() + } + if isDir { + metadataParents.push(cp) + } + if metaOnly { + continue + } else { + for _, cp := range metadataParents.items { + if err := w.update(cp); err != nil { + return err + } + } + metadataParents.clear() + } + } + if err := w.update(cp); err != nil { return err } @@ -272,7 +324,27 @@ func (r *receiver) run(ctx context.Context) error { } } }) - return g.Wait() + + if err := g.Wait(); err != nil { + return err + } + + if !metadataTransfer { + return nil + } + + // although we don't allow tranferring metadataPath, make sure there was no preexisting file/symlink + os.Remove(filepath.Join(r.dest, metadataPath)) + + f, err := os.OpenFile(filepath.Join(r.dest, metadataPath), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if _, err := metadataBuffer.WriteTo(f); err != nil { + f.Close() + return err + } + return f.Close() } func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteCloser) error { @@ -327,3 +399,39 @@ func (w *wrappedWriteCloser) Wait(ctx context.Context) error { return w.err } } + +type stack[T any] struct { + items []T +} + +func newStack[T any]() *stack[T] { + return &stack[T]{ + items: make([]T, 0, 8), + } +} + +func (s *stack[T]) push(v T) { + s.items = append(s.items, v) +} + +func (s *stack[T]) pop() (T, bool) { + if len(s.items) == 0 { + var zero T + return zero, false + } + v := s.items[len(s.items)-1] + s.items = s.items[:len(s.items)-1] + return v, true +} + +func (s *stack[T]) peek() (T, bool) { + if len(s.items) == 0 { + var zero T + return zero, false + } + return s.items[len(s.items)-1], true +} + +func (s *stack[T]) clear() { + s.items = s.items[:0] +} diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/send.go b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/send.go index e4a315638..31d610081 100644 --- a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/send.go +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/send.go @@ -148,7 +148,8 @@ func (s *sender) sendFile(h *sendHandle) error { func (s *sender) walk(ctx context.Context) error { var i uint32 = 0 - err := s.fs.Walk(ctx, "/", func(path string, entry os.DirEntry, err error) error { + target := string(filepath.Separator) + err := s.fs.Walk(ctx, target, func(path string, entry os.DirEntry, err error) error { if err != nil { return err } diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/stat_unix.go b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/stat_unix.go index def901e31..94e5da0f9 100644 --- a/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/stat_unix.go +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/fsutil/stat_unix.go @@ -5,6 +5,7 @@ package fsutil import ( "os" + "strings" "syscall" "github.com/containerd/continuity/sysx" @@ -12,6 +13,8 @@ import ( "github.com/tonistiigi/fsutil/types" ) +const xattrApplePrefix = "com.apple." + func loadXattr(origpath string, stat *types.Stat) error { xattrs, err := sysx.LListxattr(origpath) if err != nil { @@ -23,16 +26,29 @@ func loadXattr(origpath string, stat *types.Stat) error { if len(xattrs) > 0 { m := make(map[string][]byte) for _, key := range xattrs { - v, err := sysx.LGetxattr(origpath, key) - if err == nil { + if skipXattr(key) { + continue + } + + if v, err := sysx.LGetxattr(origpath, key); err == nil { m[key] = v } } - stat.Xattrs = m + + if len(m) > 0 { + stat.Xattrs = m + } } return nil } +func skipXattr(key string) bool { + if strings.HasPrefix(key, xattrApplePrefix) { + return true + } + return false +} + func setUnixOpt(fi os.FileInfo, stat *types.Stat, path string, seenFiles map[uint64]string) { s := fi.Sys().(*syscall.Stat_t) diff --git a/src/cmd/linuxkit/vendor/github.com/tonistiigi/go-csvvalue/Dockerfile b/src/cmd/linuxkit/vendor/github.com/tonistiigi/go-csvvalue/Dockerfile index fd144d923..19c76ae11 100644 --- a/src/cmd/linuxkit/vendor/github.com/tonistiigi/go-csvvalue/Dockerfile +++ b/src/cmd/linuxkit/vendor/github.com/tonistiigi/go-csvvalue/Dockerfile @@ -1,7 +1,7 @@ #syntax=docker/dockerfile:1.8 #check=error=true -ARG GO_VERSION=1.22 +ARG GO_VERSION=1.23 ARG XX_VERSION=1.4.0 ARG COVER_FILENAME="cover.out" diff --git a/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/format.go b/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/format.go index 1f89d0c59..60977980c 100644 --- a/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/format.go +++ b/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/format.go @@ -143,6 +143,10 @@ const ( blockSize = 512 // Size of each block in a tar stream nameSize = 100 // Max length of the name field in USTAR format prefixSize = 155 // Max length of the prefix field in USTAR format + + // Max length of a special file (PAX header, GNU long name or link). + // This matches the limit used by libarchive. + maxSpecialFileSize = 1 << 20 ) // blockPadding computes the number of bytes needed to pad offset up to the diff --git a/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 6a6b3e018..248a7ccb1 100644 --- a/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -144,7 +144,7 @@ func (tr *Reader) next() (*Header, error) { continue // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: format.mayOnlyBe(FormatGNU) - realname, err := io.ReadAll(tr) + realname, err := readSpecialFile(tr) if err != nil { return nil, err } @@ -338,7 +338,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { // parsePAX parses PAX headers. // If an extended header (type 'x') is invalid, ErrHeader is returned func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := io.ReadAll(r) + buf, err := readSpecialFile(r) if err != nil { return nil, err } @@ -889,6 +889,16 @@ func tryReadFull(r io.Reader, b []byte) (n int, err error) { return n, err } +// readSpecialFile is like io.ReadAll except it returns +// ErrFieldTooLong if more than maxSpecialFileSize is read. +func readSpecialFile(r io.Reader) ([]byte, error) { + buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1)) + if len(buf) > maxSpecialFileSize { + return nil, ErrFieldTooLong + } + return buf, err +} + // discard skips n bytes in r, reporting an error if unable to do so. func discard(tr *Reader, n int64) error { var seekSkipped, copySkipped int64 diff --git a/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/writer.go b/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/writer.go index e80498d03..893eac00a 100644 --- a/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/writer.go +++ b/src/cmd/linuxkit/vendor/github.com/vbatts/tar-split/archive/tar/writer.go @@ -199,6 +199,9 @@ func (tw *Writer) writePAXHeader(hdr *Header, paxHdrs map[string]string) error { flag = TypeXHeader } data := buf.String() + if len(data) > maxSpecialFileSize { + return ErrFieldTooLong + } if err := tw.writeRawFile(name, data, flag, FormatPAX); err != nil || isGlobal { return err // Global headers return here } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go index 18436eaed..9e87fb4bb 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go @@ -51,11 +51,11 @@ type config struct { tracer trace.Tracer meter metric.Meter - rpcDuration metric.Float64Histogram - rpcRequestSize metric.Int64Histogram - rpcResponseSize metric.Int64Histogram - rpcRequestsPerRPC metric.Int64Histogram - rpcResponsesPerRPC metric.Int64Histogram + rpcDuration metric.Float64Histogram + rpcInBytes metric.Int64Histogram + rpcOutBytes metric.Int64Histogram + rpcInMessages metric.Int64Histogram + rpcOutMessages metric.Int64Histogram } // Option applies an option value for a config. @@ -96,46 +96,64 @@ func newConfig(opts []Option, role string) *config { } } - c.rpcRequestSize, err = c.meter.Int64Histogram("rpc."+role+".request.size", + rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size", metric.WithDescription("Measures size of RPC request messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcRequestSize == nil { - c.rpcRequestSize = noop.Int64Histogram{} + if rpcRequestSize == nil { + rpcRequestSize = noop.Int64Histogram{} } } - c.rpcResponseSize, err = c.meter.Int64Histogram("rpc."+role+".response.size", + rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size", metric.WithDescription("Measures size of RPC response messages (uncompressed)."), metric.WithUnit("By")) if err != nil { otel.Handle(err) - if c.rpcResponseSize == nil { - c.rpcResponseSize = noop.Int64Histogram{} + if rpcResponseSize == nil { + rpcResponseSize = noop.Int64Histogram{} } } - c.rpcRequestsPerRPC, err = c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", + rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcRequestsPerRPC == nil { - c.rpcRequestsPerRPC = noop.Int64Histogram{} + if rpcRequestsPerRPC == nil { + rpcRequestsPerRPC = noop.Int64Histogram{} } } - c.rpcResponsesPerRPC, err = c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", + rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc", metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."), metric.WithUnit("{count}")) if err != nil { otel.Handle(err) - if c.rpcResponsesPerRPC == nil { - c.rpcResponsesPerRPC = noop.Int64Histogram{} + if rpcResponsesPerRPC == nil { + rpcResponsesPerRPC = noop.Int64Histogram{} } } + switch role { + case "client": + c.rpcInBytes = rpcResponseSize + c.rpcInMessages = rpcResponsesPerRPC + c.rpcOutBytes = rpcRequestSize + c.rpcOutMessages = rpcRequestsPerRPC + case "server": + c.rpcInBytes = rpcRequestSize + c.rpcInMessages = rpcRequestsPerRPC + c.rpcOutBytes = rpcResponseSize + c.rpcOutMessages = rpcResponsesPerRPC + default: + c.rpcInBytes = noop.Int64Histogram{} + c.rpcInMessages = noop.Int64Histogram{} + c.rpcOutBytes = noop.Int64Histogram{} + c.rpcOutMessages = noop.Int64Histogram{} + } + return c } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go index fbcbfb84e..216127d6f 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go @@ -13,21 +13,22 @@ import ( "google.golang.org/grpc/stats" "google.golang.org/grpc/status" - "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/metric" semconv "go.opentelemetry.io/otel/semconv/v1.17.0" "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" ) type gRPCContextKey struct{} type gRPCContext struct { - messagesReceived int64 - messagesSent int64 - metricAttrs []attribute.KeyValue - record bool + inMessages int64 + outMessages int64 + metricAttrs []attribute.KeyValue + record bool } type serverHandler struct { @@ -58,20 +59,26 @@ func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) cont name, attrs := internal.ParseFullMethod(info.FullMethodName) attrs = append(attrs, RPCSystemGRPC) - ctx, _ = h.tracer.Start( - trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), - name, - trace.WithSpanKind(trace.SpanKindServer), - trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), - ) + + record := true + if h.config.Filter != nil { + record = h.config.Filter(info) + } + + if record { + ctx, _ = h.tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), + ) + } gctx := gRPCContext{ metricAttrs: append(attrs, h.config.MetricAttributes...), - record: true, - } - if h.config.Filter != nil { - gctx.record = h.config.Filter(info) + record: record, } + return context.WithValue(ctx, gRPCContextKey{}, &gctx) } @@ -98,19 +105,24 @@ func NewClientHandler(opts ...Option) stats.Handler { func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context { name, attrs := internal.ParseFullMethod(info.FullMethodName) attrs = append(attrs, RPCSystemGRPC) - ctx, _ = h.tracer.Start( - ctx, - name, - trace.WithSpanKind(trace.SpanKindClient), - trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), - ) + + record := true + if h.config.Filter != nil { + record = h.config.Filter(info) + } + + if record { + ctx, _ = h.tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...), + ) + } gctx := gRPCContext{ metricAttrs: append(attrs, h.config.MetricAttributes...), - record: true, - } - if h.config.Filter != nil { - gctx.record = h.config.Filter(info) + record: record, } return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators) @@ -150,8 +162,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool case *stats.Begin: case *stats.InPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesReceived, 1) - c.rpcRequestSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + messageId = atomic.AddInt64(&gctx.inMessages, 1) + c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.ReceivedEvent { @@ -166,8 +178,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool } case *stats.OutPayload: if gctx != nil { - messageId = atomic.AddInt64(&gctx.messagesSent, 1) - c.rpcResponseSize.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) + messageId = atomic.AddInt64(&gctx.outMessages, 1) + c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...))) } if c.SentEvent { @@ -213,8 +225,8 @@ func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool c.rpcDuration.Record(ctx, elapsedTime, recordOpts...) if gctx != nil { - c.rpcRequestsPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesReceived), recordOpts...) - c.rpcResponsesPerRPC.Record(ctx, atomic.LoadInt64(&gctx.messagesSent), recordOpts...) + c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...) + c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...) } default: return diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go index 439251e13..442e8a838 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -5,7 +5,7 @@ package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.g // Version is the current release version of the gRPC instrumentation. func Version() string { - return "0.56.0" + return "0.60.0" // This string is updated by the pre_release.sh script during release } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go index e555a475f..3ea05d019 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -12,6 +12,7 @@ import ( "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/propagation" "go.opentelemetry.io/otel/trace" ) @@ -21,15 +22,16 @@ type middleware struct { operation string server string - tracer trace.Tracer - propagators propagation.TextMapPropagator - spanStartOptions []trace.SpanStartOption - readEvent bool - writeEvent bool - filters []Filter - spanNameFormatter func(string, *http.Request) string - publicEndpoint bool - publicEndpointFn func(*http.Request) bool + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + publicEndpoint bool + publicEndpointFn func(*http.Request) bool + metricAttributesFn func(*http.Request) []attribute.KeyValue semconv semconv.HTTPServer } @@ -79,6 +81,7 @@ func (h *middleware) configure(c *config) { h.publicEndpointFn = c.PublicEndpointFn h.server = c.ServerName h.semconv = semconv.NewHTTPServer(c.Meter) + h.metricAttributesFn = c.MetricAttributesFn } // serveHTTP sets up tracing and calls the given next http.Handler with the span @@ -189,14 +192,16 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http // Use floating point division here for higher precision (instead of Millisecond method). elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + metricAttributes := semconv.MetricAttributes{ + Req: r, + StatusCode: statusCode, + AdditionalAttributes: append(labeler.Get(), h.metricAttributesFromRequest(r)...), + } + h.semconv.RecordMetrics(ctx, semconv.ServerMetricData{ - ServerName: h.server, - ResponseSize: bytesWritten, - MetricAttributes: semconv.MetricAttributes{ - Req: r, - StatusCode: statusCode, - AdditionalAttributes: labeler.Get(), - }, + ServerName: h.server, + ResponseSize: bytesWritten, + MetricAttributes: metricAttributes, MetricData: semconv.MetricData{ RequestSize: bw.BytesRead(), ElapsedTime: elapsedTime, @@ -204,6 +209,14 @@ func (h *middleware) serveHTTP(w http.ResponseWriter, r *http.Request, next http }) } +func (h *middleware) metricAttributesFromRequest(r *http.Request) []attribute.KeyValue { + var attributeForRequest []attribute.KeyValue + if h.metricAttributesFn != nil { + attributeForRequest = h.metricAttributesFn(r) + } + return attributeForRequest +} + // WithRouteTag annotates spans and metrics with the provided route name // with HTTP route attribute. func WithRouteTag(route string, h http.Handler) http.Handler { diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go index a945f5566..866aa21dc 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/body_wrapper.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/body_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go new file mode 100644 index 000000000..9e00dd2fc --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/gen.go @@ -0,0 +1,10 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package request // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request" + +// Generate request package: +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper.go.tmpl "--data={}" --out=body_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/body_wrapper_test.go.tmpl "--data={}" --out=body_wrapper_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper.go.tmpl "--data={}" --out=resp_writer_wrapper.go +//go:generate gotmpl --body=../../../../../../internal/shared/request/resp_writer_wrapper_test.go.tmpl "--data={}" --out=resp_writer_wrapper_test.go diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go index fbc344cbd..73184e7d0 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request/resp_writer_wrapper.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/request/resp_writer_wrapper.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go index 3b036f8a3..4693a0194 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/env.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/env.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -16,6 +19,10 @@ import ( "go.opentelemetry.io/otel/metric" ) +// OTelSemConvStabilityOptIn is an environment variable. +// That can be set to "old" or "http/dup" to opt into the new HTTP semantic conventions. +const OTelSemConvStabilityOptIn = "OTEL_SEMCONV_STABILITY_OPT_IN" + type ResponseTelemetry struct { StatusCode int ReadBytes int64 @@ -31,6 +38,11 @@ type HTTPServer struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter serverLatencyMeasure metric.Float64Histogram + + // New metrics + requestBodySizeHistogram metric.Int64Histogram + responseBodySizeHistogram metric.Int64Histogram + requestDurationHistogram metric.Float64Histogram } // RequestTraceAttrs returns trace attributes for an HTTP request received by a @@ -56,6 +68,15 @@ func (s HTTPServer) RequestTraceAttrs(server string, req *http.Request) []attrib return OldHTTPServer{}.RequestTraceAttrs(server, req) } +func (s HTTPServer) NetworkTransportAttr(network string) []attribute.KeyValue { + if s.duplicate { + return append([]attribute.KeyValue{OldHTTPServer{}.NetworkTransportAttr(network)}, CurrentHTTPServer{}.NetworkTransportAttr(network)) + } + return []attribute.KeyValue{ + OldHTTPServer{}.NetworkTransportAttr(network), + } +} + // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. @@ -103,38 +124,56 @@ type MetricData struct { ElapsedTime float64 } -var metricAddOptionPool = &sync.Pool{ - New: func() interface{} { - return &[]metric.AddOption{} - }, -} - -func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { - if s.requestBytesCounter == nil || s.responseBytesCounter == nil || s.serverLatencyMeasure == nil { - // This will happen if an HTTPServer{} is used instead of NewHTTPServer. - return +var ( + metricAddOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.AddOption{} + }, } - attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) - o := metric.WithAttributeSet(attribute.NewSet(attributes...)) - addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) - *addOpts = append(*addOpts, o) - s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) - s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) - s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) - *addOpts = (*addOpts)[:0] - metricAddOptionPool.Put(addOpts) + metricRecordOptionPool = &sync.Pool{ + New: func() interface{} { + return &[]metric.RecordOption{} + }, + } +) - // TODO: Duplicate Metrics +func (s HTTPServer) RecordMetrics(ctx context.Context, md ServerMetricData) { + if s.requestBytesCounter != nil && s.responseBytesCounter != nil && s.serverLatencyMeasure != nil { + attributes := OldHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + addOpts := metricAddOptionPool.Get().(*[]metric.AddOption) + *addOpts = append(*addOpts, o) + s.requestBytesCounter.Add(ctx, md.RequestSize, *addOpts...) + s.responseBytesCounter.Add(ctx, md.ResponseSize, *addOpts...) + s.serverLatencyMeasure.Record(ctx, md.ElapsedTime, o) + *addOpts = (*addOpts)[:0] + metricAddOptionPool.Put(addOpts) + } + + if s.duplicate && s.requestDurationHistogram != nil && s.requestBodySizeHistogram != nil && s.responseBodySizeHistogram != nil { + attributes := CurrentHTTPServer{}.MetricAttributes(md.ServerName, md.Req, md.StatusCode, md.AdditionalAttributes) + o := metric.WithAttributeSet(attribute.NewSet(attributes...)) + recordOpts := metricRecordOptionPool.Get().(*[]metric.RecordOption) + *recordOpts = append(*recordOpts, o) + s.requestBodySizeHistogram.Record(ctx, md.RequestSize, *recordOpts...) + s.responseBodySizeHistogram.Record(ctx, md.ResponseSize, *recordOpts...) + s.requestDurationHistogram.Record(ctx, md.ElapsedTime, o) + *recordOpts = (*recordOpts)[:0] + metricRecordOptionPool.Put(recordOpts) + } } func NewHTTPServer(meter metric.Meter) HTTPServer { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) duplicate := env == "http/dup" server := HTTPServer{ duplicate: duplicate, } server.requestBytesCounter, server.responseBytesCounter, server.serverLatencyMeasure = OldHTTPServer{}.createMeasures(meter) + if duplicate { + server.requestBodySizeHistogram, server.responseBodySizeHistogram, server.requestDurationHistogram = CurrentHTTPServer{}.createMeasures(meter) + } return server } @@ -145,14 +184,23 @@ type HTTPClient struct { requestBytesCounter metric.Int64Counter responseBytesCounter metric.Int64Counter latencyMeasure metric.Float64Histogram + + // new metrics + requestBodySize metric.Int64Histogram + requestDuration metric.Float64Histogram } func NewHTTPClient(meter metric.Meter) HTTPClient { - env := strings.ToLower(os.Getenv("OTEL_SEMCONV_STABILITY_OPT_IN")) + env := strings.ToLower(os.Getenv(OTelSemConvStabilityOptIn)) + duplicate := env == "http/dup" client := HTTPClient{ - duplicate: env == "http/dup", + duplicate: duplicate, } client.requestBytesCounter, client.responseBytesCounter, client.latencyMeasure = OldHTTPClient{}.createMeasures(meter) + if duplicate { + client.requestBodySize, client.requestDuration = CurrentHTTPClient{}.createMeasures(meter) + } + return client } @@ -204,34 +252,56 @@ func (o MetricOpts) AddOptions() metric.AddOption { return o.addOptions } -func (c HTTPClient) MetricOptions(ma MetricAttributes) MetricOpts { +func (c HTTPClient) MetricOptions(ma MetricAttributes) map[string]MetricOpts { + opts := map[string]MetricOpts{} + attributes := OldHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) - // TODO: Duplicate Metrics set := metric.WithAttributeSet(attribute.NewSet(attributes...)) - return MetricOpts{ + opts["old"] = MetricOpts{ measurement: set, addOptions: set, } + + if c.duplicate { + attributes := CurrentHTTPClient{}.MetricAttributes(ma.Req, ma.StatusCode, ma.AdditionalAttributes) + set := metric.WithAttributeSet(attribute.NewSet(attributes...)) + opts["new"] = MetricOpts{ + measurement: set, + addOptions: set, + } + } + + return opts } -func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts MetricOpts) { +func (s HTTPClient) RecordMetrics(ctx context.Context, md MetricData, opts map[string]MetricOpts) { if s.requestBytesCounter == nil || s.latencyMeasure == nil { // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.requestBytesCounter.Add(ctx, md.RequestSize, opts.AddOptions()) - s.latencyMeasure.Record(ctx, md.ElapsedTime, opts.MeasurementOption()) + s.requestBytesCounter.Add(ctx, md.RequestSize, opts["old"].AddOptions()) + s.latencyMeasure.Record(ctx, md.ElapsedTime, opts["old"].MeasurementOption()) - // TODO: Duplicate Metrics + if s.duplicate { + s.requestBodySize.Record(ctx, md.RequestSize, opts["new"].MeasurementOption()) + s.requestDuration.Record(ctx, md.ElapsedTime, opts["new"].MeasurementOption()) + } } -func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts metric.AddOption) { +func (s HTTPClient) RecordResponseSize(ctx context.Context, responseData int64, opts map[string]MetricOpts) { if s.responseBytesCounter == nil { // This will happen if an HTTPClient{} is used instead of NewHTTPClient(). return } - s.responseBytesCounter.Add(ctx, responseData, opts) - // TODO: Duplicate Metrics + s.responseBytesCounter.Add(ctx, responseData, opts["old"].AddOptions()) +} + +func (s HTTPClient) TraceAttributes(host string) []attribute.KeyValue { + if s.duplicate { + return append(OldHTTPClient{}.TraceAttributes(host), CurrentHTTPClient{}.TraceAttributes(host)...) + } + + return OldHTTPClient{}.TraceAttributes(host) } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go new file mode 100644 index 000000000..f2cf8a152 --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/gen.go @@ -0,0 +1,14 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package semconv // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv" + +// Generate semconv package: +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/bench_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=bench_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/env_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=env_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/httpconv_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=httpconv_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/util_test.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=util_test.go +//go:generate gotmpl --body=../../../../../../internal/shared/semconv/v1.20.0.go.tmpl "--data={ \"pkg\": \"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp\" }" --out=v1.20.0.go diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go index dc9ec7bc3..8b85eff90 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/httpconv.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/httpconv.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -7,10 +10,13 @@ import ( "fmt" "net/http" "reflect" + "slices" "strconv" "strings" "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/noop" semconvNew "go.opentelemetry.io/otel/semconv/v1.26.0" ) @@ -135,6 +141,19 @@ func (n CurrentHTTPServer) RequestTraceAttrs(server string, req *http.Request) [ return attrs } +func (o CurrentHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + switch network { + case "tcp", "tcp4", "tcp6": + return semconvNew.NetworkTransportTCP + case "udp", "udp4", "udp6": + return semconvNew.NetworkTransportUDP + case "unix", "unixgram", "unixpacket": + return semconvNew.NetworkTransportUnix + default: + return semconvNew.NetworkTransportPipe + } +} + func (n CurrentHTTPServer) method(method string) (attribute.KeyValue, attribute.KeyValue) { if method == "" { return semconvNew.HTTPRequestMethodGet, attribute.KeyValue{} @@ -199,6 +218,86 @@ func (n CurrentHTTPServer) Route(route string) attribute.KeyValue { return semconvNew.HTTPRoute(route) } +func (n CurrentHTTPServer) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPServerRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerRequestBodySizeDescription), + ) + handleErr(err) + + responseBodySizeHistogram, err := meter.Int64Histogram( + semconvNew.HTTPServerResponseBodySizeName, + metric.WithUnit(semconvNew.HTTPServerResponseBodySizeUnit), + metric.WithDescription(semconvNew.HTTPServerResponseBodySizeDescription), + ) + handleErr(err) + requestDurationHistogram, err := meter.Float64Histogram( + semconvNew.HTTPServerRequestDurationName, + metric.WithUnit(semconvNew.HTTPServerRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPServerRequestDurationDescription), + ) + handleErr(err) + + return requestBodySizeHistogram, responseBodySizeHistogram, requestDurationHistogram +} + +func (n CurrentHTTPServer) MetricAttributes(server string, req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 3 + var host string + var p int + if server == "" { + host, p = SplitHostPort(req.Host) + } else { + // Prioritize the primary server name. + host, p = SplitHostPort(server) + if p < 0 { + _, p = SplitHostPort(req.Host) + } + } + hostPort := requiredHTTPPort(req.TLS != nil, p) + if hostPort > 0 { + num++ + } + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + n.scheme(req.TLS != nil), + semconvNew.ServerAddress(host)) + + if hostPort > 0 { + attributes = append(attributes, semconvNew.ServerPort(hostPort)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + type CurrentHTTPClient struct{} // RequestTraceAttrs returns trace attributes for an HTTP request made by a client. @@ -343,6 +442,98 @@ func (n CurrentHTTPClient) method(method string) (attribute.KeyValue, attribute. return semconvNew.HTTPRequestMethodGet, orig } +func (n CurrentHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Histogram, metric.Float64Histogram) { + if meter == nil { + return noop.Int64Histogram{}, noop.Float64Histogram{} + } + + var err error + requestBodySize, err := meter.Int64Histogram( + semconvNew.HTTPClientRequestBodySizeName, + metric.WithUnit(semconvNew.HTTPClientRequestBodySizeUnit), + metric.WithDescription(semconvNew.HTTPClientRequestBodySizeDescription), + ) + handleErr(err) + + requestDuration, err := meter.Float64Histogram( + semconvNew.HTTPClientRequestDurationName, + metric.WithUnit(semconvNew.HTTPClientRequestDurationUnit), + metric.WithDescription(semconvNew.HTTPClientRequestDurationDescription), + ) + handleErr(err) + + return requestBodySize, requestDuration +} + +func (n CurrentHTTPClient) MetricAttributes(req *http.Request, statusCode int, additionalAttributes []attribute.KeyValue) []attribute.KeyValue { + num := len(additionalAttributes) + 2 + var h string + if req.URL != nil { + h = req.URL.Host + } + var requestHost string + var requestPort int + for _, hostport := range []string{h, req.Header.Get("Host")} { + requestHost, requestPort = SplitHostPort(hostport) + if requestHost != "" || requestPort > 0 { + break + } + } + + port := requiredHTTPPort(req.URL != nil && req.URL.Scheme == "https", requestPort) + if port > 0 { + num++ + } + + protoName, protoVersion := netProtocol(req.Proto) + if protoName != "" { + num++ + } + if protoVersion != "" { + num++ + } + + if statusCode > 0 { + num++ + } + + attributes := slices.Grow(additionalAttributes, num) + attributes = append(attributes, + semconvNew.HTTPRequestMethodKey.String(standardizeHTTPMethod(req.Method)), + semconvNew.ServerAddress(requestHost), + n.scheme(req.TLS != nil), + ) + + if port > 0 { + attributes = append(attributes, semconvNew.ServerPort(port)) + } + if protoName != "" { + attributes = append(attributes, semconvNew.NetworkProtocolName(protoName)) + } + if protoVersion != "" { + attributes = append(attributes, semconvNew.NetworkProtocolVersion(protoVersion)) + } + + if statusCode > 0 { + attributes = append(attributes, semconvNew.HTTPResponseStatusCode(statusCode)) + } + return attributes +} + +// Attributes for httptrace. +func (n CurrentHTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconvNew.ServerAddress(host), + } +} + +func (n CurrentHTTPClient) scheme(https bool) attribute.KeyValue { // nolint:revive + if https { + return semconvNew.URLScheme("https") + } + return semconvNew.URLScheme("http") +} + func isErrorStatusCode(code int) bool { return code >= 400 || code < 100 } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go index 93e8d0f94..315d3dd29 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/util.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/util.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -75,7 +78,16 @@ func serverClientIP(xForwardedFor string) string { func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } @@ -96,3 +108,13 @@ func handleErr(err error) { otel.Handle(err) } } + +func standardizeHTTPMethod(method string) string { + method = strings.ToUpper(method) + switch method { + case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: + default: + method = "_OTHER" + } + return method +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go index c042249dd..742c2113e 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv/v1.20.0.go @@ -1,3 +1,6 @@ +// Code created by gotmpl. DO NOT MODIFY. +// source: internal/shared/semconv/v120.0.go.tmpl + // Copyright The OpenTelemetry Authors // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +11,6 @@ import ( "io" "net/http" "slices" - "strings" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil" "go.opentelemetry.io/otel/attribute" @@ -39,6 +41,10 @@ func (o OldHTTPServer) RequestTraceAttrs(server string, req *http.Request) []att return semconvutil.HTTPServerRequest(server, req) } +func (o OldHTTPServer) NetworkTransportAttr(network string) attribute.KeyValue { + return semconvutil.NetTransport(network) +} + // ResponseTraceAttrs returns trace attributes for telemetry from an HTTP response. // // If any of the fields in the ResponseTelemetry are not set the attribute will be omitted. @@ -144,7 +150,7 @@ func (o OldHTTPServer) MetricAttributes(server string, req *http.Request, status attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), o.scheme(req.TLS != nil), semconv.NetHostName(host)) @@ -214,7 +220,7 @@ func (o OldHTTPClient) MetricAttributes(req *http.Request, statusCode int, addit attributes := slices.Grow(additionalAttributes, n) attributes = append(attributes, - standardizeHTTPMethodMetric(req.Method), + semconv.HTTPMethod(standardizeHTTPMethod(req.Method)), semconv.NetPeerName(requestHost), ) @@ -263,12 +269,9 @@ func (o OldHTTPClient) createMeasures(meter metric.Meter) (metric.Int64Counter, return requestBytesCounter, responseBytesCounter, latencyMeasure } -func standardizeHTTPMethodMetric(method string) attribute.KeyValue { - method = strings.ToUpper(method) - switch method { - case http.MethodConnect, http.MethodDelete, http.MethodGet, http.MethodHead, http.MethodOptions, http.MethodPatch, http.MethodPost, http.MethodPut, http.MethodTrace: - default: - method = "_OTHER" +// Attributes for httptrace. +func (c OldHTTPClient) TraceAttributes(host string) []attribute.KeyValue { + return []attribute.KeyValue{ + semconv.NetHostName(host), } - return semconv.HTTPMethod(method) } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go index b80a1db61..de74fa252 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil/netconv.go @@ -200,6 +200,15 @@ func splitHostPort(hostport string) (host string, port int) { func netProtocol(proto string) (name string, version string) { name, version, _ = strings.Cut(proto, "/") - name = strings.ToLower(name) + switch name { + case "HTTP": + name = "http" + case "QUIC": + name = "quic" + case "SPDY": + name = "spdy" + default: + name = strings.ToLower(name) + } return name, version } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go index 39681ad4b..44b86ad86 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -153,7 +153,7 @@ func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { // For handling response bytes we leverage a callback when the client reads the http response readRecordFunc := func(n int64) { - t.semconv.RecordResponseSize(ctx, n, metricOpts.AddOptions()) + t.semconv.RecordResponseSize(ctx, n, metricOpts) } // traces diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go index 353e43b91..1ec9a00c7 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -5,7 +5,7 @@ package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http // Version is the current release version of the otelhttp instrumentation. func Version() string { - return "0.58.0" + return "0.60.0" // This string is updated by the pre_release.sh script during release } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.gitignore b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.gitignore index ae8577ef3..749e8e881 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.gitignore +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.gitignore @@ -1,6 +1,7 @@ .DS_Store Thumbs.db +.cache/ .tools/ venv/ .idea/ diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.golangci.yml b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.golangci.yml index ce3f40b60..c58e48ab0 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.golangci.yml +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -25,13 +25,13 @@ linters: - perfsprint - revive - staticcheck - - tenv - testifylint - typecheck - unconvert - unused - unparam - usestdlibvars + - usetesting issues: # Maximum issues count per one linter. @@ -175,132 +175,60 @@ linters-settings: # This means that linting errors with less than 0.8 confidence will be ignored. # Default: 0.8 confidence: 0.01 + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md rules: - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#blank-imports - name: blank-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#bool-literal-in-expr - name: bool-literal-in-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#constant-logical-expr - name: constant-logical-expr - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-as-argument - # TODO (#3372) re-enable linter when it is compatible. https://github.com/golangci/golangci-lint/issues/3280 - name: context-as-argument disabled: true arguments: - allowTypesBefore: "*testing.T" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#context-keys-type + - allowTypesBefore: "*testing.T" - name: context-keys-type - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#deep-exit - name: deep-exit - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#defer - name: defer - disabled: false arguments: - ["call-chain", "loop"] - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#dot-imports - name: dot-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#duplicated-imports - name: duplicated-imports - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#early-return - name: early-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-block + arguments: + - "preserveScope" - name: empty-block - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#empty-lines - name: empty-lines - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-naming - name: error-naming - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-return - name: error-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#error-strings - name: error-strings - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#errorf - name: errorf - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#exported - name: exported - disabled: false arguments: - "sayRepetitiveInsteadOfStutters" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#flag-parameter - name: flag-parameter - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#identical-branches - name: identical-branches - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#if-return - name: if-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#increment-decrement - - name: increment-decrement - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#indent-error-flow - - name: indent-error-flow - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#import-shadowing - name: import-shadowing - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#package-comments + - name: increment-decrement + - name: indent-error-flow + arguments: + - "preserveScope" - name: package-comments - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range - name: range - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-in-closure - name: range-val-in-closure - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#range-val-address - name: range-val-address - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#redefines-builtin-id - name: redefines-builtin-id - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#string-format - name: string-format - disabled: false arguments: - - panic - '/^[^\n]*$/' - must not contain line breaks - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#struct-tag - name: struct-tag - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#superfluous-else - name: superfluous-else - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#time-equal - - name: time-equal - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-naming - - name: var-naming - disabled: false arguments: - - ["ID"] # AllowList - - ["Otel", "Aws", "Gcp"] # DenyList - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#var-declaration - - name: var-declaration - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unconditional-recursion + - "preserveScope" + - name: time-equal - name: unconditional-recursion - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unexported-return - name: unexported-return - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unhandled-error - name: unhandled-error - disabled: false arguments: - "fmt.Fprint" - "fmt.Fprintf" @@ -308,15 +236,14 @@ linters-settings: - "fmt.Print" - "fmt.Printf" - "fmt.Println" - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unnecessary-stmt - name: unnecessary-stmt - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#useless-break - name: useless-break - disabled: false - # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#waitgroup-by-value + - name: var-declaration + - name: var-naming + arguments: + - ["ID"] # AllowList + - ["Otel", "Aws", "Gcp"] # DenyList - name: waitgroup-by-value - disabled: false testifylint: enable-all: true disable: diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CHANGELOG.md index a30988f25..c076db282 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CHANGELOG.md +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -8,6 +8,61 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] + + + +## [1.35.0/0.57.0/0.11.0] 2025-03-05 + +This release is the last to support [Go 1.22]. +The next release will require at least [Go 1.23]. + +### Added + +- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187) +- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187) +- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193) +- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211) +- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210) +- The `go.opentelemetry.io/otel/semconv/v1.28.0` package. + The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236) +- The `go.opentelemetry.io/otel/semconv/v1.30.0` package. + The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions. + See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240) +- Document the pitfalls of using `Resource` as a comparable type. + `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272) +- Support [Go 1.24]. (#6304) +- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. + It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`. + Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317) + +### Changed + +- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`. + This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores. + This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198) + +### Fixes + +- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368) +- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369) + +## [1.34.0/0.56.0/0.10.0] 2025-01-17 + +### Changed + +- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167) + +### Fixed + +- Relax minimum Go version to 1.22.0 in various modules. (#6073) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143) +- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143) + ## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12 ### Added @@ -37,9 +92,6 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm - Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997) - Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032) - - - ## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08 ### Added @@ -3185,7 +3237,9 @@ It contains api and sdk for trace and meter. - CircleCI build CI manifest files. - CODEOWNERS file to track owners of this project. -[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.33.0...HEAD +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD +[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 +[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0 [1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0 @@ -3275,6 +3329,7 @@ It contains api and sdk for trace and meter. +[Go 1.24]: https://go.dev/doc/go1.24 [Go 1.23]: https://go.dev/doc/go1.23 [Go 1.22]: https://go.dev/doc/go1.22 [Go 1.21]: https://go.dev/doc/go1.21 diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md index 22a2e9dbd..7b8af585a 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -181,6 +181,18 @@ patterns in the spec. For a deeper discussion, see [this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). +## Tests + +Each functionality should be covered by tests. + +Performance-critical functionality should also be covered by benchmarks. + +- Pull requests adding a performance-critical functionality +should have `go test -bench` output in their description. +- Pull requests changing a performance-critical functionality +should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) +output in their description. + ## Documentation Each (non-internal, non-test) package must be documented using diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/Makefile b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/Makefile index a7f6d8cc6..226410d74 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/Makefile +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/Makefile @@ -11,6 +11,10 @@ ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} GO = go TIMEOUT = 60 +# User to run as in docker images. +DOCKER_USER=$(shell id -u):$(shell id -g) +DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile + .DEFAULT_GOAL := precommit .PHONY: precommit ci @@ -81,20 +85,20 @@ PIP := $(PYTOOLS)/pip WORKDIR := /workdir # The python image to use for the virtual environment. -PYTHONIMAGE := python:3.11.3-slim-bullseye +PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) # Run the python image with the current directory mounted. -DOCKERPY := docker run --rm -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) +DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE) # Create a virtual environment for Python tools. $(PYTOOLS): # The `--upgrade` flag is needed to ensure that the virtual environment is # created with the latest pip version. - @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" + @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip" # Install python packages into the virtual environment. $(PYTOOLS)/%: $(PYTOOLS) - @$(DOCKERPY) $(PIP) install -r requirements.txt + @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt CODESPELL = $(PYTOOLS)/codespell $(CODESPELL): PACKAGE=codespell @@ -119,7 +123,7 @@ vanity-import-fix: $(PORTO) # Generate go.work file for local development. .PHONY: go-work go-work: $(CROSSLINK) - $(CROSSLINK) work --root=$(shell pwd) + $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7 # Build @@ -265,13 +269,30 @@ check-clean-work-tree: exit 1; \ fi +# The weaver docker image to use for semconv-generate. +WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE)) + SEMCONVPKG ?= "semconv/" .PHONY: semconv-generate -semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT) +semconv-generate: $(SEMCONVKIT) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) - [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -z "$(SEMCONVPKG)/capitalizations.txt" -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" - $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)" + # Ensure the target directory for source code is available. + mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG} + # Note: We mount a home directory for downloading/storing the semconv repository. + # Weaver will automatically clean the cache when finished, but the directories will remain. + mkdir -p ~/.weaver + docker run --rm \ + -u $(DOCKER_USER) \ + --env HOME=/tmp/weaver \ + --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ + --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ + --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ + $(WEAVER_IMAGE) registry generate \ + --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \ + --templates=/home/weaver/templates \ + --param tag=$(TAG) \ + go \ + /home/weaver/target $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" .PHONY: gorelease diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/README.md b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/README.md index efec27890..8421cd7e5 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/README.md +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/README.md @@ -1,9 +1,11 @@ # OpenTelemetry-Go -[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![ci](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml) [![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) [![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). @@ -49,18 +51,25 @@ Currently, this project supports the following environments. | OS | Go Version | Architecture | |----------|------------|--------------| +| Ubuntu | 1.24 | amd64 | | Ubuntu | 1.23 | amd64 | | Ubuntu | 1.22 | amd64 | +| Ubuntu | 1.24 | 386 | | Ubuntu | 1.23 | 386 | | Ubuntu | 1.22 | 386 | -| Linux | 1.23 | arm64 | -| Linux | 1.22 | arm64 | +| Ubuntu | 1.24 | arm64 | +| Ubuntu | 1.23 | arm64 | +| Ubuntu | 1.22 | arm64 | +| macOS 13 | 1.24 | amd64 | | macOS 13 | 1.23 | amd64 | | macOS 13 | 1.22 | amd64 | +| macOS | 1.24 | arm64 | | macOS | 1.23 | arm64 | | macOS | 1.22 | arm64 | +| Windows | 1.24 | amd64 | | Windows | 1.23 | amd64 | | Windows | 1.22 | amd64 | +| Windows | 1.24 | 386 | | Windows | 1.23 | 386 | | Windows | 1.22 | 386 | diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/RELEASING.md b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/RELEASING.md index ffa9b6125..1e13ae54f 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/RELEASING.md +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -5,17 +5,14 @@ New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. The `semconv-generate` make target is used for this. -1. Checkout a local copy of the [OpenTelemetry Semantic Conventions] to the desired release tag. -2. Pull the latest `otel/semconvgen` image: `docker pull otel/semconvgen:latest` -3. Run the `make semconv-generate ...` target from this repository. +1. Set the `TAG` environment variable to the semantic convention tag you want to generate. +2. Run the `make semconv-generate ...` target from this repository. For example, ```sh -export TAG="v1.21.0" # Change to the release version you are generating. -export OTEL_SEMCONV_REPO="/absolute/path/to/opentelemetry/semantic-conventions" -docker pull otel/semconvgen:latest -make semconv-generate # Uses the exported TAG and OTEL_SEMCONV_REPO. +export TAG="v1.30.0" # Change to the release version you are generating. +make semconv-generate # Uses the exported TAG. ``` This should create a new sub-package of [`semconv`](./semconv). @@ -130,6 +127,6 @@ Importantly, bump any package versions referenced to be the latest one you just Bump the dependencies in the following Go services: -- [`accountingservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accountingservice) -- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkoutservice) -- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/productcatalogservice) +- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) +- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) +- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog) diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile new file mode 100644 index 000000000..e4c4a753c --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile @@ -0,0 +1,3 @@ +# This is a renovate-friendly source of Docker images. +FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python +FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go index f6dd3decc..2e7690e43 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -13,7 +13,8 @@ func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationSco return nil } return &commonpb.InstrumentationScope{ - Name: il.Name, - Version: il.Version, + Name: il.Name, + Version: il.Version, + Attributes: Iterator(il.Attributes.Iter()), } } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go index 709c8f70a..f5cad46b7 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go @@ -5,5 +5,5 @@ package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" // Version is the current release version of the OpenTelemetry OTLP trace exporter in use. func Version() string { - return "1.31.0" + return "1.35.0" } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/renovate.json b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/renovate.json index 0a29a2f13..a6fa353f9 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/renovate.json +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/renovate.json @@ -1,7 +1,7 @@ { "$schema": "https://docs.renovatebot.com/renovate-schema.json", "extends": [ - "config:recommended" + "config:best-practices" ], "ignorePaths": [], "labels": ["Skip Changelog", "dependencies"], @@ -15,10 +15,8 @@ "enabled": true }, { - "matchFileNames": ["internal/tools/**"], - "matchManagers": ["gomod"], - "matchDepTypes": ["indirect"], - "enabled": false + "matchPackageNames": ["go.opentelemetry.io/build-tools/**"], + "groupName": "build-tools" }, { "matchPackageNames": ["google.golang.org/genproto/googleapis/**"], diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/requirements.txt b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/requirements.txt index ab09daf9d..1bb55fb1c 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/requirements.txt +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/requirements.txt @@ -1 +1 @@ -codespell==2.3.0 +codespell==2.4.1 diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go index ce455dc54..3d703c5d9 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -5,6 +5,7 @@ package resource // import "go.opentelemetry.io/otel/sdk/resource" import ( "encoding/xml" + "errors" "fmt" "io" "os" @@ -63,7 +64,7 @@ func parsePlistFile(file io.Reader) (map[string]string, error) { } if len(v.Dict.Key) != len(v.Dict.String) { - return nil, fmt.Errorf("the number of and elements doesn't match") + return nil, errors.New("the number of and elements doesn't match") } properties := make(map[string]string, len(v.Dict.Key)) diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go index ad4b50df4..09b91e1e1 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -21,11 +21,22 @@ import ( // Resources should be passed and stored as pointers // (`*resource.Resource`). The `nil` value is equivalent to an empty // Resource. +// +// Note that the Go == operator compares not just the resource attributes but +// also all other internals of the Resource type. Therefore, Resource values +// should not be used as map or database keys. In general, the [Resource.Equal] +// method should be used instead of direct comparison with ==, since that +// method ensures the correct comparison of resource attributes, and the +// [attribute.Distinct] returned from [Resource.Equivalent] should be used for +// map and database keys instead. type Resource struct { attrs attribute.Set schemaURL string } +// Compile-time check that the Resource remains comparable. +var _ map[Resource]struct{} = nil + var ( defaultResource *Resource defaultResourceOnce sync.Once @@ -137,15 +148,19 @@ func (r *Resource) Iter() attribute.Iterator { return r.attrs.Iter() } -// Equal returns true when a Resource is equivalent to this Resource. -func (r *Resource) Equal(eq *Resource) bool { +// Equal returns whether r and o represent the same resource. Two resources can +// be equal even if they have different schema URLs. +// +// See the documentation on the [Resource] type for the pitfalls of using == +// with Resource values; most code should use Equal instead. +func (r *Resource) Equal(o *Resource) bool { if r == nil { r = Empty() } - if eq == nil { - eq = Empty() + if o == nil { + o = Empty() } - return r.Equivalent() == eq.Equivalent() + return r.Equivalent() == o.Equivalent() } // Merge creates a new [Resource] by merging a and b. diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go index ccc97e1b6..6872cbb4e 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -201,10 +201,9 @@ func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { } } - wait := make(chan error) + wait := make(chan error, 1) go func() { wait <- bsp.exportSpans(ctx) - close(wait) }() // Wait until the export is finished or the context is cancelled/timed out select { diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go index ebb6df6c9..aa7b262d0 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -47,12 +47,12 @@ const ( // Drop will not record the span and all attributes/events will be dropped. Drop SamplingDecision = iota - // Record indicates the span's `IsRecording() == true`, but `Sampled` flag - // *must not* be set. + // RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag + // must not be set. RecordOnly - // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag - // *must* be set. + // RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag + // must be set. RecordAndSample ) diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go index 554111bb4..664e13e03 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -58,7 +58,7 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { var err error ssp.stopOnce.Do(func() { stopFunc := func(exp SpanExporter) (<-chan error, func()) { - done := make(chan error) + done := make(chan error, 1) return done, func() { done <- exp.Shutdown(ctx) } } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/version.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/version.go index ba7db4889..2b797fbde 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/version.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/sdk/version.go @@ -5,5 +5,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk" // Version is the current release version of the OpenTelemetry SDK in use. func Version() string { - return "1.33.0" + return "1.35.0" } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/auto.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/auto.go new file mode 100644 index 000000000..7e2910025 --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/auto.go @@ -0,0 +1,661 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" + "go.opentelemetry.io/otel/trace/embedded" + "go.opentelemetry.io/otel/trace/internal/telemetry" +) + +// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider]. +// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument +// the process using the returned TracerProvider, all of the telemetry it +// produces will be processed and handled by that Instrumentation. By default, +// if no Instrumentation instruments the TracerProvider it will not generate +// any trace telemetry. +func newAutoTracerProvider() TracerProvider { return tracerProviderInstance } + +var tracerProviderInstance = new(autoTracerProvider) + +type autoTracerProvider struct{ embedded.TracerProvider } + +var _ TracerProvider = autoTracerProvider{} + +func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer { + cfg := NewTracerConfig(opts...) + return autoTracer{ + name: name, + version: cfg.InstrumentationVersion(), + schemaURL: cfg.SchemaURL(), + } +} + +type autoTracer struct { + embedded.Tracer + + name, schemaURL, version string +} + +var _ Tracer = autoTracer{} + +func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) { + var psc SpanContext + sampled := true + span := new(autoSpan) + + // Ask eBPF for sampling decision and span context info. + t.start(ctx, span, &psc, &sampled, &span.spanContext) + + span.sampled.Store(sampled) + + ctx = ContextWithSpan(ctx, span) + + if sampled { + // Only build traces if sampled. + cfg := NewSpanStartConfig(opts...) + span.traces, span.span = t.traces(name, cfg, span.spanContext, psc) + } + + return ctx, span +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (t *autoTracer) start( + ctx context.Context, + spanPtr *autoSpan, + psc *SpanContext, + sampled *bool, + sc *SpanContext, +) { + start(ctx, spanPtr, psc, sampled, sc) +} + +// start is used for testing. +var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {} + +func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) { + span := &telemetry.Span{ + TraceID: telemetry.TraceID(sc.TraceID()), + SpanID: telemetry.SpanID(sc.SpanID()), + Flags: uint32(sc.TraceFlags()), + TraceState: sc.TraceState().String(), + ParentSpanID: telemetry.SpanID(psc.SpanID()), + Name: name, + Kind: spanKind(cfg.SpanKind()), + } + + span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes()) + + links := cfg.Links() + if limit := maxSpan.Links; limit == 0 { + n := int64(len(links)) + if n > 0 { + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + } else { + if limit > 0 { + n := int64(max(len(links)-limit, 0)) + span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + links = links[n:] + } + span.Links = convLinks(links) + } + + if t := cfg.Timestamp(); !t.IsZero() { + span.StartTime = cfg.Timestamp() + } else { + span.StartTime = time.Now() + } + + return &telemetry.Traces{ + ResourceSpans: []*telemetry.ResourceSpans{ + { + ScopeSpans: []*telemetry.ScopeSpans{ + { + Scope: &telemetry.Scope{ + Name: t.name, + Version: t.version, + }, + Spans: []*telemetry.Span{span}, + SchemaURL: t.schemaURL, + }, + }, + }, + }, + }, span +} + +func spanKind(kind SpanKind) telemetry.SpanKind { + switch kind { + case SpanKindInternal: + return telemetry.SpanKindInternal + case SpanKindServer: + return telemetry.SpanKindServer + case SpanKindClient: + return telemetry.SpanKindClient + case SpanKindProducer: + return telemetry.SpanKindProducer + case SpanKindConsumer: + return telemetry.SpanKindConsumer + } + return telemetry.SpanKind(0) // undefined. +} + +type autoSpan struct { + embedded.Span + + spanContext SpanContext + sampled atomic.Bool + + mu sync.Mutex + traces *telemetry.Traces + span *telemetry.Span +} + +func (s *autoSpan) SpanContext() SpanContext { + if s == nil { + return SpanContext{} + } + // s.spanContext is immutable, do not acquire lock s.mu. + return s.spanContext +} + +func (s *autoSpan) IsRecording() bool { + if s == nil { + return false + } + + return s.sampled.Load() +} + +func (s *autoSpan) SetStatus(c codes.Code, msg string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + if s.span.Status == nil { + s.span.Status = new(telemetry.Status) + } + + s.span.Status.Message = msg + + switch c { + case codes.Unset: + s.span.Status.Code = telemetry.StatusCodeUnset + case codes.Error: + s.span.Status.Code = telemetry.StatusCodeError + case codes.Ok: + s.span.Status.Code = telemetry.StatusCodeOK + } +} + +func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + limit := maxSpan.Attrs + if limit == 0 { + // No attributes allowed. + n := int64(len(attrs)) + if n > 0 { + s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return + } + + m := make(map[string]int) + for i, a := range s.span.Attrs { + m[a.Key] = i + } + + for _, a := range attrs { + val := convAttrValue(a.Value) + if val.Empty() { + s.span.DroppedAttrs++ + continue + } + + if idx, ok := m[string(a.Key)]; ok { + s.span.Attrs[idx] = telemetry.Attr{ + Key: string(a.Key), + Value: val, + } + } else if limit < 0 || len(s.span.Attrs) < limit { + s.span.Attrs = append(s.span.Attrs, telemetry.Attr{ + Key: string(a.Key), + Value: val, + }) + m[string(a.Key)] = len(s.span.Attrs) - 1 + } else { + s.span.DroppedAttrs++ + } + } +} + +// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The +// number of dropped attributes is also returned. +func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { + n := len(attrs) + if limit == 0 { + var out uint32 + if n > 0 { + out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked. + } + return nil, out + } + + if limit < 0 { + // Unlimited. + return convAttrs(attrs), 0 + } + + if n < 0 { + n = 0 + } + + limit = min(n, limit) + return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked. +} + +func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { + if len(attrs) == 0 { + // Avoid allocations if not necessary. + return nil + } + + out := make([]telemetry.Attr, 0, len(attrs)) + for _, attr := range attrs { + key := string(attr.Key) + val := convAttrValue(attr.Value) + if val.Empty() { + continue + } + out = append(out, telemetry.Attr{Key: key, Value: val}) + } + return out +} + +func convAttrValue(value attribute.Value) telemetry.Value { + switch value.Type() { + case attribute.BOOL: + return telemetry.BoolValue(value.AsBool()) + case attribute.INT64: + return telemetry.Int64Value(value.AsInt64()) + case attribute.FLOAT64: + return telemetry.Float64Value(value.AsFloat64()) + case attribute.STRING: + v := truncate(maxSpan.AttrValueLen, value.AsString()) + return telemetry.StringValue(v) + case attribute.BOOLSLICE: + slice := value.AsBoolSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.BoolValue(v)) + } + return telemetry.SliceValue(out...) + case attribute.INT64SLICE: + slice := value.AsInt64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Int64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.FLOAT64SLICE: + slice := value.AsFloat64Slice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + out = append(out, telemetry.Float64Value(v)) + } + return telemetry.SliceValue(out...) + case attribute.STRINGSLICE: + slice := value.AsStringSlice() + out := make([]telemetry.Value, 0, len(slice)) + for _, v := range slice { + v = truncate(maxSpan.AttrValueLen, v) + out = append(out, telemetry.StringValue(v)) + } + return telemetry.SliceValue(out...) + } + return telemetry.Value{} +} + +// truncate returns a truncated version of s such that it contains less than +// the limit number of characters. Truncation is applied by returning the limit +// number of valid characters contained in s. +// +// If limit is negative, it returns the original string. +// +// UTF-8 is supported. When truncating, all invalid characters are dropped +// before applying truncation. +// +// If s already contains less than the limit number of bytes, it is returned +// unchanged. No invalid characters are removed. +func truncate(limit int, s string) string { + // This prioritize performance in the following order based on the most + // common expected use-cases. + // + // - Short values less than the default limit (128). + // - Strings with valid encodings that exceed the limit. + // - No limit. + // - Strings with invalid encodings that exceed the limit. + if limit < 0 || len(s) <= limit { + return s + } + + // Optimistically, assume all valid UTF-8. + var b strings.Builder + count := 0 + for i, c := range s { + if c != utf8.RuneError { + count++ + if count > limit { + return s[:i] + } + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // Invalid encoding. + b.Grow(len(s) - 1) + _, _ = b.WriteString(s[:i]) + s = s[i:] + break + } + } + + // Fast-path, no invalid input. + if b.Cap() == 0 { + return s + } + + // Truncate while validating UTF-8. + for i := 0; i < len(s) && count < limit; { + c := s[i] + if c < utf8.RuneSelf { + // Optimization for single byte runes (common case). + _ = b.WriteByte(c) + i++ + count++ + continue + } + + _, size := utf8.DecodeRuneInString(s[i:]) + if size == 1 { + // We checked for all 1-byte runes above, this is a RuneError. + i++ + continue + } + + _, _ = b.WriteString(s[i : i+size]) + i += size + count++ + } + + return b.String() +} + +func (s *autoSpan) End(opts ...SpanEndOption) { + if s == nil || !s.sampled.Swap(false) { + return + } + + // s.end exists so the lock (s.mu) is not held while s.ended is called. + s.ended(s.end(opts)) +} + +func (s *autoSpan) end(opts []SpanEndOption) []byte { + s.mu.Lock() + defer s.mu.Unlock() + + cfg := NewSpanEndConfig(opts...) + if t := cfg.Timestamp(); !t.IsZero() { + s.span.EndTime = cfg.Timestamp() + } else { + s.span.EndTime = time.Now() + } + + b, _ := json.Marshal(s.traces) // TODO: do not ignore this error. + return b +} + +// Expected to be implemented in eBPF. +// +//go:noinline +func (*autoSpan) ended(buf []byte) { ended(buf) } + +// ended is used for testing. +var ended = func([]byte) {} + +func (s *autoSpan) RecordError(err error, opts ...EventOption) { + if s == nil || err == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + attrs := cfg.Attributes() + attrs = append(attrs, + semconv.ExceptionType(typeStr(err)), + semconv.ExceptionMessage(err.Error()), + ) + if cfg.StackTrace() { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n]))) + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs) +} + +func typeStr(i any) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func (s *autoSpan) AddEvent(name string, opts ...EventOption) { + if s == nil || !s.sampled.Load() { + return + } + + cfg := NewEventConfig(opts...) + + s.mu.Lock() + defer s.mu.Unlock() + + s.addEvent(name, cfg.Timestamp(), cfg.Attributes()) +} + +// addEvent adds an event with name and attrs at tStamp to the span. The span +// lock (s.mu) needs to be held by the caller. +func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) { + limit := maxSpan.Events + + if limit == 0 { + s.span.DroppedEvents++ + return + } + + if limit > 0 && len(s.span.Events) == limit { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Events[:limit-1], s.span.Events[1:]) + s.span.Events = s.span.Events[:limit-1] + s.span.DroppedEvents++ + } + + e := &telemetry.SpanEvent{Time: tStamp, Name: name} + e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs) + + s.span.Events = append(s.span.Events, e) +} + +func (s *autoSpan) AddLink(link Link) { + if s == nil || !s.sampled.Load() { + return + } + + l := maxSpan.Links + + s.mu.Lock() + defer s.mu.Unlock() + + if l == 0 { + s.span.DroppedLinks++ + return + } + + if l > 0 && len(s.span.Links) == l { + // Drop head while avoiding allocation of more capacity. + copy(s.span.Links[:l-1], s.span.Links[1:]) + s.span.Links = s.span.Links[:l-1] + s.span.DroppedLinks++ + } + + s.span.Links = append(s.span.Links, convLink(link)) +} + +func convLinks(links []Link) []*telemetry.SpanLink { + out := make([]*telemetry.SpanLink, 0, len(links)) + for _, link := range links { + out = append(out, convLink(link)) + } + return out +} + +func convLink(link Link) *telemetry.SpanLink { + l := &telemetry.SpanLink{ + TraceID: telemetry.TraceID(link.SpanContext.TraceID()), + SpanID: telemetry.SpanID(link.SpanContext.SpanID()), + TraceState: link.SpanContext.TraceState().String(), + Flags: uint32(link.SpanContext.TraceFlags()), + } + l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes) + + return l +} + +func (s *autoSpan) SetName(name string) { + if s == nil || !s.sampled.Load() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + s.span.Name = name +} + +func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() } + +// maxSpan are the span limits resolved during startup. +var maxSpan = newSpanLimits() + +type spanLimits struct { + // Attrs is the number of allowed attributes for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if + // that is not set, is used. + Attrs int + // AttrValueLen is the maximum attribute value length allowed for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the + // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1 + // if that is not set, is used. + AttrValueLen int + // Events is the number of allowed events for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set. + Events int + // EventAttrs is the number of allowed attributes for a span event. + // + // The is resolved from the environment variable value for the + // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set. + EventAttrs int + // Links is the number of allowed Links for a span. + // + // This is resolved from the environment variable value for the + // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set. + Links int + // LinkAttrs is the number of allowed attributes for a span link. + // + // This is resolved from the environment variable value for the + // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set. + LinkAttrs int +} + +func newSpanLimits() spanLimits { + return spanLimits{ + Attrs: firstEnv( + 128, + "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT", + "OTEL_ATTRIBUTE_COUNT_LIMIT", + ), + AttrValueLen: firstEnv( + -1, // Unlimited. + "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT", + "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT", + ), + Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"), + EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"), + Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"), + LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"), + } +} + +// firstEnv returns the parsed integer value of the first matching environment +// variable from keys. The defaultVal is returned if the value is not an +// integer or no match is found. +func firstEnv(defaultVal int, keys ...string) int { + for _, key := range keys { + strV := os.Getenv(key) + if strV == "" { + continue + } + + v, err := strconv.Atoi(strV) + if err == nil { + return v + } + // Ignore invalid environment variable. + } + + return defaultVal +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go new file mode 100644 index 000000000..f663547b4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// Attr is a key-value pair. +type Attr struct { + Key string `json:"key,omitempty"` + Value Value `json:"value,omitempty"` +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64 value. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int returns an Attr for an int value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Float64 returns an Attr for a float64 value. +func Float64(key string, value float64) Attr { + return Attr{key, Float64Value(value)} +} + +// Bool returns an Attr for a bool value. +func Bool(key string, value bool) Attr { + return Attr{key, BoolValue(value)} +} + +// Bytes returns an Attr for a []byte value. +// The passed slice must not be changed after it is passed. +func Bytes(key string, value []byte) Attr { + return Attr{key, BytesValue(value)} +} + +// Slice returns an Attr for a []Value value. +// The passed slice must not be changed after it is passed. +func Slice(key string, value ...Value) Attr { + return Attr{key, SliceValue(value...)} +} + +// Map returns an Attr for a map value. +// The passed slice must not be changed after it is passed. +func Map(key string, value ...Attr) Attr { + return Attr{key, MapValue(value...)} +} + +// Equal returns if a is equal to b. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go new file mode 100644 index 000000000..5debe90bb --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go @@ -0,0 +1,8 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +/* +Package telemetry provides a lightweight representations of OpenTelemetry +telemetry that is compatible with the OTLP JSON protobuf encoding. +*/ +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go new file mode 100644 index 000000000..7b1ae3c4e --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go @@ -0,0 +1,103 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/hex" + "errors" + "fmt" +) + +const ( + traceIDSize = 16 + spanIDSize = 8 +) + +// TraceID is a custom data type that is used for all trace IDs. +type TraceID [traceIDSize]byte + +// String returns the hex string representation form of a TraceID. +func (tid TraceID) String() string { + return hex.EncodeToString(tid[:]) +} + +// IsEmpty returns false if id contains at least one non-zero byte. +func (tid TraceID) IsEmpty() bool { + return tid == [traceIDSize]byte{} +} + +// MarshalJSON converts the trace ID into a hex string enclosed in quotes. +func (tid TraceID) MarshalJSON() ([]byte, error) { + if tid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(tid[:]) +} + +// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in +// quotes. +func (tid *TraceID) UnmarshalJSON(data []byte) error { + *tid = [traceIDSize]byte{} + return unmarshalJSON(tid[:], data) +} + +// SpanID is a custom data type that is used for all span IDs. +type SpanID [spanIDSize]byte + +// String returns the hex string representation form of a SpanID. +func (sid SpanID) String() string { + return hex.EncodeToString(sid[:]) +} + +// IsEmpty returns true if the span ID contains at least one non-zero byte. +func (sid SpanID) IsEmpty() bool { + return sid == [spanIDSize]byte{} +} + +// MarshalJSON converts span ID into a hex string enclosed in quotes. +func (sid SpanID) MarshalJSON() ([]byte, error) { + if sid.IsEmpty() { + return []byte(`""`), nil + } + return marshalJSON(sid[:]) +} + +// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes. +func (sid *SpanID) UnmarshalJSON(data []byte) error { + *sid = [spanIDSize]byte{} + return unmarshalJSON(sid[:], data) +} + +// marshalJSON converts id into a hex string enclosed in quotes. +func marshalJSON(id []byte) ([]byte, error) { + // Plus 2 quote chars at the start and end. + hexLen := hex.EncodedLen(len(id)) + 2 + + b := make([]byte, hexLen) + hex.Encode(b[1:hexLen-1], id) + b[0], b[hexLen-1] = '"', '"' + + return b, nil +} + +// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. +func unmarshalJSON(dst []byte, src []byte) error { + if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { + src = src[1 : l-1] + } + nLen := len(src) + if nLen == 0 { + return nil + } + + if len(dst) != hex.DecodedLen(nLen) { + return errors.New("invalid length for ID") + } + + _, err := hex.Decode(dst, src) + if err != nil { + return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err) + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go new file mode 100644 index 000000000..f5e3a8cec --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "encoding/json" + "strconv" +) + +// protoInt64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoInt64 int64 + +// Int64 returns the protoInt64 as an int64. +func (i *protoInt64) Int64() int64 { return int64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoInt64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedInt, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return err + } + *i = protoInt64(parsedInt) + } else { + var parsedInt int64 + if err := json.Unmarshal(data, &parsedInt); err != nil { + return err + } + *i = protoInt64(parsedInt) + } + return nil +} + +// protoUint64 represents the protobuf encoding of integers which can be either +// strings or integers. +type protoUint64 uint64 + +// Int64 returns the protoUint64 as a uint64. +func (i *protoUint64) Uint64() uint64 { return uint64(*i) } + +// UnmarshalJSON decodes both strings and integers. +func (i *protoUint64) UnmarshalJSON(data []byte) error { + if data[0] == '"' { + var str string + if err := json.Unmarshal(data, &str); err != nil { + return err + } + parsedUint, err := strconv.ParseUint(str, 10, 64) + if err != nil { + return err + } + *i = protoUint64(parsedUint) + } else { + var parsedUint uint64 + if err := json.Unmarshal(data, &parsedUint); err != nil { + return err + } + *i = protoUint64(parsedUint) + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go new file mode 100644 index 000000000..1798a702d --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Resource information. +type Resource struct { + // Attrs are the set of attributes that describe the resource. Attribute + // keys MUST be unique (it is not allowed to have more than one attribute + // with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // DroppedAttrs is the number of dropped attributes. If the value + // is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (r *Resource) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Resource type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Resource field: %#v", keyIface) + } + + switch key { + case "attributes": + err = decoder.Decode(&r.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&r.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go new file mode 100644 index 000000000..c2b4c635b --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Scope is the identifying values of the instrumentation scope. +type Scope struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` + Attrs []Attr `json:"attributes,omitempty"` + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r. +func (s *Scope) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Scope type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Scope field: %#v", keyIface) + } + + switch key { + case "name": + err = decoder.Decode(&s.Name) + case "version": + err = decoder.Decode(&s.Version) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go new file mode 100644 index 000000000..3c5e1cdb1 --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go @@ -0,0 +1,460 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// A Span represents a single operation performed by a single component of the +// system. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR + // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes OR of length + // other than 8 bytes is considered invalid (empty string in OTLP/JSON + // is zero-length and thus is also invalid). + // + // This field is required. + SpanID SpanID `json:"spanId,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `json:"traceState,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanID SpanID `json:"parentSpanId,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether a span's parent + // is remote. The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // When creating span messages, if the message is logically forwarded from another source + // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD + // be copied as-is. If creating from a source that does not have an equivalent flags field + // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST + // be set to zero. + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `json:"name"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind SpanKind `json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTime time.Time `json:"startTimeUnixNano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTime time.Time `json:"endTimeUnixNano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "example.com/myattribute": true + // "example.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // events is a collection of Event items. + Events []*SpanEvent `json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEvents uint32 `json:"droppedEventsCount,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*SpanLink `json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinks uint32 `json:"droppedLinksCount,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `json:"status,omitempty"` +} + +// MarshalJSON encodes s into OTLP formatted JSON. +func (s Span) MarshalJSON() ([]byte, error) { + startT := s.StartTime.UnixNano() + if s.StartTime.IsZero() || startT < 0 { + startT = 0 + } + + endT := s.EndTime.UnixNano() + if s.EndTime.IsZero() || endT < 0 { + endT = 0 + } + + // Override non-empty default SpanID marshal and omitempty. + var parentSpanId string + if !s.ParentSpanID.IsEmpty() { + b := make([]byte, hex.EncodedLen(spanIDSize)) + hex.Encode(b, s.ParentSpanID[:]) + parentSpanId = string(b) + } + + type Alias Span + return json.Marshal(struct { + Alias + ParentSpanID string `json:"parentSpanId,omitempty"` + StartTime uint64 `json:"startTimeUnixNano,omitempty"` + EndTime uint64 `json:"endTimeUnixNano,omitempty"` + }{ + Alias: Alias(s), + ParentSpanID: parentSpanId, + StartTime: uint64(startT), // nolint:gosec // >0 checked above. + EndTime: uint64(endT), // nolint:gosec // >0 checked above. + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s. +func (s *Span) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Span type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Span field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&s.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&s.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&s.TraceState) + case "parentSpanId", "parent_span_id": + err = decoder.Decode(&s.ParentSpanID) + case "flags": + err = decoder.Decode(&s.Flags) + case "name": + err = decoder.Decode(&s.Name) + case "kind": + err = decoder.Decode(&s.Kind) + case "startTimeUnixNano", "start_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.StartTime = time.Unix(0, v) + case "endTimeUnixNano", "end_time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + s.EndTime = time.Unix(0, v) + case "attributes": + err = decoder.Decode(&s.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&s.DroppedAttrs) + case "events": + err = decoder.Decode(&s.Events) + case "droppedEventsCount", "dropped_events_count": + err = decoder.Decode(&s.DroppedEvents) + case "links": + err = decoder.Decode(&s.Links) + case "droppedLinksCount", "dropped_links_count": + err = decoder.Decode(&s.DroppedLinks) + case "status": + err = decoder.Decode(&s.Status) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// SpanFlags represents constants used to interpret the +// Span.flags field, which is protobuf 'fixed32' type and is to +// be used as bit-fields. Each non-zero value defined in this enum is +// a bit-mask. To extract the bit-field, for example, use an +// expression like: +// +// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK) +// +// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. +// +// Note that Span flags were introduced in version 1.1 of the +// OpenTelemetry protocol. Older Span producers do not set this +// field, consequently consumers should not rely on the absence of a +// particular flag bit to indicate the presence of a particular feature. +type SpanFlags int32 + +const ( + // Bits 0-7 are used for trace flags. + SpanFlagsTraceFlagsMask SpanFlags = 255 + // Bits 8 and 9 are used to indicate that the parent span or link span is remote. + // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. + // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. + SpanFlagsContextHasIsRemoteMask SpanFlags = 256 + // SpanFlagsContextHasIsRemoteMask indicates the Span is remote. + SpanFlagsContextIsRemoteMask SpanFlags = 512 +) + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type SpanKind int32 + +const ( + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + SpanKindInternal SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + SpanKindServer SpanKind = 2 + // Indicates that the span describes a request to some remote service. + SpanKindClient SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + SpanKindProducer SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + SpanKindConsumer SpanKind = 5 +) + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type SpanEvent struct { + // time_unix_nano is the time the event occurred. + Time time.Time `json:"timeUnixNano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` +} + +// MarshalJSON encodes e into OTLP formatted JSON. +func (e SpanEvent) MarshalJSON() ([]byte, error) { + t := e.Time.UnixNano() + if e.Time.IsZero() || t < 0 { + t = 0 + } + + type Alias SpanEvent + return json.Marshal(struct { + Alias + Time uint64 `json:"timeUnixNano,omitempty"` + }{ + Alias: Alias(e), + Time: uint64(t), // nolint: gosec // >0 checked above + }) +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se. +func (se *SpanEvent) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanEvent type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanEvent field: %#v", keyIface) + } + + switch key { + case "timeUnixNano", "time_unix_nano": + var val protoUint64 + err = decoder.Decode(&val) + v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked. + se.Time = time.Unix(0, v) + case "name": + err = decoder.Decode(&se.Name) + case "attributes": + err = decoder.Decode(&se.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&se.DroppedAttrs) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type SpanLink struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceID TraceID `json:"traceId,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanID SpanID `json:"spanId,omitempty"` + // The trace_state associated with the link. + TraceState string `json:"traceState,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + // Attribute keys MUST be unique (it is not allowed to have more than one + // attribute with the same key). + Attrs []Attr `json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"` + // Flags, a bit field. + // + // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace + // Context specification. To read the 8-bit W3C trace flag, use + // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`. + // + // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions. + // + // Bits 8 and 9 represent the 3 states of whether the link is remote. + // The states are (unknown, is not remote, is remote). + // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`. + // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`. + // + // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero. + // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero. + // + // [Optional]. + Flags uint32 `json:"flags,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl. +func (sl *SpanLink) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid SpanLink type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid SpanLink field: %#v", keyIface) + } + + switch key { + case "traceId", "trace_id": + err = decoder.Decode(&sl.TraceID) + case "spanId", "span_id": + err = decoder.Decode(&sl.SpanID) + case "traceState", "trace_state": + err = decoder.Decode(&sl.TraceState) + case "attributes": + err = decoder.Decode(&sl.Attrs) + case "droppedAttributesCount", "dropped_attributes_count": + err = decoder.Decode(&sl.DroppedAttrs) + case "flags": + err = decoder.Decode(&sl.Flags) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go new file mode 100644 index 000000000..1d013a8fa --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type StatusCode int32 + +const ( + // The default status. + StatusCodeUnset StatusCode = 0 + // The Span has been validated by an Application developer or Operator to + // have completed successfully. + StatusCodeOK StatusCode = 1 + // The Span contains an error. + StatusCodeError StatusCode = 2 +) + +var statusCodeStrings = []string{ + "Unset", + "OK", + "Error", +} + +func (s StatusCode) String() string { + if s >= 0 && int(s) < len(statusCodeStrings) { + return statusCodeStrings[s] + } + return "" +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + // A developer-facing human readable error message. + Message string `json:"message,omitempty"` + // The status code. + Code StatusCode `json:"code,omitempty"` +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go new file mode 100644 index 000000000..b03940708 --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go @@ -0,0 +1,189 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" +) + +// Traces represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type Traces struct { + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td. +func (td *Traces) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid TracesData type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid TracesData field: %#v", keyIface) + } + + switch key { + case "resourceSpans", "resource_spans": + err = decoder.Decode(&td.ResourceSpans) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of ScopeSpans from a Resource. +type ResourceSpans struct { + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource Resource `json:"resource"` + // A list of ScopeSpans that originate from a resource. + ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "scope_spans" field which have their own schema_url field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs. +func (rs *ResourceSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ResourceSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface) + } + + switch key { + case "resource": + err = decoder.Decode(&rs.Resource) + case "scopeSpans", "scope_spans": + err = decoder.Decode(&rs.ScopeSpans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&rs.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} + +// A collection of Spans produced by an InstrumentationScope. +type ScopeSpans struct { + // The instrumentation scope information for the spans in this message. + // Semantically when InstrumentationScope isn't set, it is equivalent with + // an empty instrumentation scope name (unknown). + Scope *Scope `json:"scope"` + // A list of Spans that originate from an instrumentation scope. + Spans []*Span `json:"spans,omitempty"` + // The Schema URL, if known. This is the identifier of the Schema that the span data + // is recorded in. To learn more about Schema URL see + // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url + // This schema_url applies to all spans and span events in the "spans" field. + SchemaURL string `json:"schemaUrl,omitempty"` +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss. +func (ss *ScopeSpans) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid ScopeSpans type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface) + } + + switch key { + case "scope": + err = decoder.Decode(&ss.Scope) + case "spans": + err = decoder.Decode(&ss.Spans) + case "schemaUrl", "schema_url": + err = decoder.Decode(&ss.SchemaURL) + default: + // Skip unknown. + } + + if err != nil { + return err + } + } + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go new file mode 100644 index 000000000..7251492da --- /dev/null +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go @@ -0,0 +1,453 @@ +// Copyright The OpenTelemetry Authors +// SPDX-License-Identifier: Apache-2.0 + +package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry" + +import ( + "bytes" + "cmp" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "slices" + "strconv" + "unsafe" +) + +// A Value represents a structured value. +// A zero value is valid and represents an empty value. +type Value struct { + // Ensure forward compatibility by explicitly making this not comparable. + noCmp [0]func() //nolint: unused // This is indeed used. + + // num holds the value for Int64, Float64, and Bool. It holds the length + // for String, Bytes, Slice, Map. + num uint64 + // any holds either the KindBool, KindInt64, KindFloat64, stringptr, + // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64 + // then the value of Value is in num as described above. Otherwise, it + // contains the value wrapped in the appropriate type. + any any +} + +type ( + // sliceptr represents a value in Value.any for KindString Values. + stringptr *byte + // bytesptr represents a value in Value.any for KindBytes Values. + bytesptr *byte + // sliceptr represents a value in Value.any for KindSlice Values. + sliceptr *Value + // mapptr represents a value in Value.any for KindMap Values. + mapptr *Attr +) + +// ValueKind is the kind of a [Value]. +type ValueKind int + +// ValueKind values. +const ( + ValueKindEmpty ValueKind = iota + ValueKindBool + ValueKindFloat64 + ValueKindInt64 + ValueKindString + ValueKindBytes + ValueKindSlice + ValueKindMap +) + +var valueKindStrings = []string{ + "Empty", + "Bool", + "Float64", + "Int64", + "String", + "Bytes", + "Slice", + "Map", +} + +func (k ValueKind) String() string { + if k >= 0 && int(k) < len(valueKindStrings) { + return valueKindStrings[k] + } + return "" +} + +// StringValue returns a new [Value] for a string. +func StringValue(v string) Value { + return Value{ + num: uint64(len(v)), + any: stringptr(unsafe.StringData(v)), + } +} + +// IntValue returns a [Value] for an int. +func IntValue(v int) Value { return Int64Value(int64(v)) } + +// Int64Value returns a [Value] for an int64. +func Int64Value(v int64) Value { + return Value{ + num: uint64(v), // nolint: gosec // Store raw bytes. + any: ValueKindInt64, + } +} + +// Float64Value returns a [Value] for a float64. +func Float64Value(v float64) Value { + return Value{num: math.Float64bits(v), any: ValueKindFloat64} +} + +// BoolValue returns a [Value] for a bool. +func BoolValue(v bool) Value { //nolint:revive // Not a control flag. + var n uint64 + if v { + n = 1 + } + return Value{num: n, any: ValueKindBool} +} + +// BytesValue returns a [Value] for a byte slice. The passed slice must not be +// changed after it is passed. +func BytesValue(v []byte) Value { + return Value{ + num: uint64(len(v)), + any: bytesptr(unsafe.SliceData(v)), + } +} + +// SliceValue returns a [Value] for a slice of [Value]. The passed slice must +// not be changed after it is passed. +func SliceValue(vs ...Value) Value { + return Value{ + num: uint64(len(vs)), + any: sliceptr(unsafe.SliceData(vs)), + } +} + +// MapValue returns a new [Value] for a slice of key-value pairs. The passed +// slice must not be changed after it is passed. +func MapValue(kvs ...Attr) Value { + return Value{ + num: uint64(len(kvs)), + any: mapptr(unsafe.SliceData(kvs)), + } +} + +// AsString returns the value held by v as a string. +func (v Value) AsString() string { + if sp, ok := v.any.(stringptr); ok { + return unsafe.String(sp, v.num) + } + // TODO: error handle + return "" +} + +// asString returns the value held by v as a string. It will panic if the Value +// is not KindString. +func (v Value) asString() string { + return unsafe.String(v.any.(stringptr), v.num) +} + +// AsInt64 returns the value held by v as an int64. +func (v Value) AsInt64() int64 { + if v.Kind() != ValueKindInt64 { + // TODO: error handle + return 0 + } + return v.asInt64() +} + +// asInt64 returns the value held by v as an int64. If v is not of KindInt64, +// this will return garbage. +func (v Value) asInt64() int64 { + // Assumes v.num was a valid int64 (overflow not checked). + return int64(v.num) // nolint: gosec +} + +// AsBool returns the value held by v as a bool. +func (v Value) AsBool() bool { + if v.Kind() != ValueKindBool { + // TODO: error handle + return false + } + return v.asBool() +} + +// asBool returns the value held by v as a bool. If v is not of KindBool, this +// will return garbage. +func (v Value) asBool() bool { return v.num == 1 } + +// AsFloat64 returns the value held by v as a float64. +func (v Value) AsFloat64() float64 { + if v.Kind() != ValueKindFloat64 { + // TODO: error handle + return 0 + } + return v.asFloat64() +} + +// asFloat64 returns the value held by v as a float64. If v is not of +// KindFloat64, this will return garbage. +func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) } + +// AsBytes returns the value held by v as a []byte. +func (v Value) AsBytes() []byte { + if sp, ok := v.any.(bytesptr); ok { + return unsafe.Slice((*byte)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asBytes returns the value held by v as a []byte. It will panic if the Value +// is not KindBytes. +func (v Value) asBytes() []byte { + return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num) +} + +// AsSlice returns the value held by v as a []Value. +func (v Value) AsSlice() []Value { + if sp, ok := v.any.(sliceptr); ok { + return unsafe.Slice((*Value)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asSlice returns the value held by v as a []Value. It will panic if the Value +// is not KindSlice. +func (v Value) asSlice() []Value { + return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num) +} + +// AsMap returns the value held by v as a []Attr. +func (v Value) AsMap() []Attr { + if sp, ok := v.any.(mapptr); ok { + return unsafe.Slice((*Attr)(sp), v.num) + } + // TODO: error handle + return nil +} + +// asMap returns the value held by v as a []Attr. It will panic if the +// Value is not KindMap. +func (v Value) asMap() []Attr { + return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num) +} + +// Kind returns the Kind of v. +func (v Value) Kind() ValueKind { + switch x := v.any.(type) { + case ValueKind: + return x + case stringptr: + return ValueKindString + case bytesptr: + return ValueKindBytes + case sliceptr: + return ValueKindSlice + case mapptr: + return ValueKindMap + default: + return ValueKindEmpty + } +} + +// Empty returns if v does not hold any value. +func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty } + +// Equal returns if v is equal to w. +func (v Value) Equal(w Value) bool { + k1 := v.Kind() + k2 := w.Kind() + if k1 != k2 { + return false + } + switch k1 { + case ValueKindInt64, ValueKindBool: + return v.num == w.num + case ValueKindString: + return v.asString() == w.asString() + case ValueKindFloat64: + return v.asFloat64() == w.asFloat64() + case ValueKindSlice: + return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal) + case ValueKindMap: + sv := sortMap(v.asMap()) + sw := sortMap(w.asMap()) + return slices.EqualFunc(sv, sw, Attr.Equal) + case ValueKindBytes: + return bytes.Equal(v.asBytes(), w.asBytes()) + case ValueKindEmpty: + return true + default: + // TODO: error handle + return false + } +} + +func sortMap(m []Attr) []Attr { + sm := make([]Attr, len(m)) + copy(sm, m) + slices.SortFunc(sm, func(a, b Attr) int { + return cmp.Compare(a.Key, b.Key) + }) + + return sm +} + +// String returns Value's value as a string, formatted like [fmt.Sprint]. +// +// The returned string is meant for debugging; +// the string representation is not stable. +func (v Value) String() string { + switch v.Kind() { + case ValueKindString: + return v.asString() + case ValueKindInt64: + // Assumes v.num was a valid int64 (overflow not checked). + return strconv.FormatInt(int64(v.num), 10) // nolint: gosec + case ValueKindFloat64: + return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) + case ValueKindBool: + return strconv.FormatBool(v.asBool()) + case ValueKindBytes: + return fmt.Sprint(v.asBytes()) + case ValueKindMap: + return fmt.Sprint(v.asMap()) + case ValueKindSlice: + return fmt.Sprint(v.asSlice()) + case ValueKindEmpty: + return "" + default: + // Try to handle this as gracefully as possible. + // + // Don't panic here. The goal here is to have developers find this + // first if a slog.Kind is is not handled. It is + // preferable to have user's open issue asking why their attributes + // have a "unhandled: " prefix than say that their code is panicking. + return fmt.Sprintf("", v.Kind()) + } +} + +// MarshalJSON encodes v into OTLP formatted JSON. +func (v *Value) MarshalJSON() ([]byte, error) { + switch v.Kind() { + case ValueKindString: + return json.Marshal(struct { + Value string `json:"stringValue"` + }{v.asString()}) + case ValueKindInt64: + return json.Marshal(struct { + Value string `json:"intValue"` + }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes. + case ValueKindFloat64: + return json.Marshal(struct { + Value float64 `json:"doubleValue"` + }{v.asFloat64()}) + case ValueKindBool: + return json.Marshal(struct { + Value bool `json:"boolValue"` + }{v.asBool()}) + case ValueKindBytes: + return json.Marshal(struct { + Value []byte `json:"bytesValue"` + }{v.asBytes()}) + case ValueKindMap: + return json.Marshal(struct { + Value struct { + Values []Attr `json:"values"` + } `json:"kvlistValue"` + }{struct { + Values []Attr `json:"values"` + }{v.asMap()}}) + case ValueKindSlice: + return json.Marshal(struct { + Value struct { + Values []Value `json:"values"` + } `json:"arrayValue"` + }{struct { + Values []Value `json:"values"` + }{v.asSlice()}}) + case ValueKindEmpty: + return nil, nil + default: + return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String()) + } +} + +// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v. +func (v *Value) UnmarshalJSON(data []byte) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + + t, err := decoder.Token() + if err != nil { + return err + } + if t != json.Delim('{') { + return errors.New("invalid Value type") + } + + for decoder.More() { + keyIface, err := decoder.Token() + if err != nil { + if errors.Is(err, io.EOF) { + // Empty. + return nil + } + return err + } + + key, ok := keyIface.(string) + if !ok { + return fmt.Errorf("invalid Value key: %#v", keyIface) + } + + switch key { + case "stringValue", "string_value": + var val string + err = decoder.Decode(&val) + *v = StringValue(val) + case "boolValue", "bool_value": + var val bool + err = decoder.Decode(&val) + *v = BoolValue(val) + case "intValue", "int_value": + var val protoInt64 + err = decoder.Decode(&val) + *v = Int64Value(val.Int64()) + case "doubleValue", "double_value": + var val float64 + err = decoder.Decode(&val) + *v = Float64Value(val) + case "bytesValue", "bytes_value": + var val64 string + if err := decoder.Decode(&val64); err != nil { + return err + } + var val []byte + val, err = base64.StdEncoding.DecodeString(val64) + *v = BytesValue(val) + case "arrayValue", "array_value": + var val struct{ Values []Value } + err = decoder.Decode(&val) + *v = SliceValue(val.Values...) + case "kvlistValue", "kvlist_value": + var val struct{ Values []Attr } + err = decoder.Decode(&val) + *v = MapValue(val.Values...) + default: + // Skip unknown. + continue + } + // Use first valid. Ignore the rest. + return err + } + + // Only unknown fields. Return nil without unmarshaling any value. + return nil +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/noop.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/noop.go index ca20e9997..c8b1ae5d6 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/noop.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -82,4 +82,22 @@ func (noopSpan) AddLink(Link) {} func (noopSpan) SetName(string) {} // TracerProvider returns a no-op TracerProvider. -func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } +func (s noopSpan) TracerProvider() TracerProvider { + return s.tracerProvider(autoInstEnabled) +} + +// autoInstEnabled defines if the auto-instrumentation SDK is enabled. +// +// The auto-instrumentation is expected to overwrite this value to true when it +// attaches to the process. +var autoInstEnabled = new(bool) + +// tracerProvider return a noopTracerProvider if autoEnabled is false, +// otherwise it will return a TracerProvider from the sdk package used in +// auto-instrumentation. +func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider { + if *autoEnabled { + return newAutoTracerProvider() + } + return noopTracerProvider{} +} diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/version.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/version.go index fb7d12673..d5fa71f67 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/version.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/version.go @@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel" // Version is the current release version of OpenTelemetry in use. func Version() string { - return "1.33.0" + return "1.35.0" } diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/versions.yaml b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/versions.yaml index 9f878cd1f..2b4cb4b41 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/versions.yaml +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/otel/versions.yaml @@ -3,7 +3,7 @@ module-sets: stable-v1: - version: v1.33.0 + version: v1.35.0 modules: - go.opentelemetry.io/otel - go.opentelemetry.io/otel/bridge/opencensus @@ -23,11 +23,11 @@ module-sets: - go.opentelemetry.io/otel/sdk/metric - go.opentelemetry.io/otel/trace experimental-metrics: - version: v0.55.0 + version: v0.57.0 modules: - go.opentelemetry.io/otel/exporters/prometheus experimental-logs: - version: v0.9.0 + version: v0.11.0 modules: - go.opentelemetry.io/otel/log - go.opentelemetry.io/otel/sdk/log @@ -40,3 +40,4 @@ module-sets: - go.opentelemetry.io/otel/schema excluded-modules: - go.opentelemetry.io/otel/internal/tools + - go.opentelemetry.io/otel/trace/internal/telemetry/test diff --git a/src/cmd/linuxkit/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/src/cmd/linuxkit/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go index d7099c35b..b342a0a94 100644 --- a/src/cmd/linuxkit/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go +++ b/src/cmd/linuxkit/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -311,7 +311,8 @@ type ResourceSpans struct { // A list of ScopeSpans that originate from a resource. ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the resource data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "scope_spans" field which have their own schema_url field. @@ -384,7 +385,8 @@ type ScopeSpans struct { // A list of Spans that originate from an instrumentation scope. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // The Schema URL, if known. This is the identifier of the Schema that the span data - // is recorded in. To learn more about Schema URL see + // is recorded in. Notably, the last part of the URL path is the version number of the + // schema: http[s]://server[:port]/path/. To learn more about Schema URL see // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 2492f796a..d25979d9f 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -234,7 +234,7 @@ func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { // Identifiers with the low five bits set indicate high-tag-number format // (two or more octets), which we don't support. if tag&0x1f == 0x1f { - b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octets not supported: 0x%x", tag) return } b.AddUint8(uint8(tag)) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go index bd896bdc7..8d99551fe 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego +//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego package poly1305 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go similarity index 94% rename from src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go rename to src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go index 164cd47d3..315b84ac3 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build gc && !purego +//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le) package poly1305 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s new file mode 100644 index 000000000..bc8361da4 --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s @@ -0,0 +1,123 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build gc && !purego + +// func update(state *macState, msg []byte) +TEXT ·update(SB), $0-32 + MOVV state+0(FP), R4 + MOVV msg_base+8(FP), R5 + MOVV msg_len+16(FP), R6 + + MOVV $0x10, R7 + + MOVV (R4), R8 // h0 + MOVV 8(R4), R9 // h1 + MOVV 16(R4), R10 // h2 + MOVV 24(R4), R11 // r0 + MOVV 32(R4), R12 // r1 + + BLT R6, R7, bytes_between_0_and_15 + +loop: + MOVV (R5), R14 // msg[0:8] + MOVV 8(R5), R16 // msg[8:16] + ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow) + ADDV R16, R9, R27 + SGTU R14, R8, R24 // h0.carry + SGTU R9, R27, R28 + ADDV R27, R24, R9 // h1 + SGTU R27, R9, R24 + OR R24, R28, R24 // h1.carry + ADDV $0x01, R24, R24 + ADDV R10, R24, R10 // h2 + + ADDV $16, R5, R5 // msg = msg[16:] + +multiply: + MULV R8, R11, R14 // h0r0.lo + MULHVU R8, R11, R15 // h0r0.hi + MULV R9, R11, R13 // h1r0.lo + MULHVU R9, R11, R16 // h1r0.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MULV R10, R11, R25 + ADDV R16, R25, R25 + MULV R8, R12, R13 // h0r1.lo + MULHVU R8, R12, R16 // h0r1.hi + ADDV R13, R15, R15 + SGTU R13, R15, R24 + ADDV R24, R16, R16 + MOVV R16, R8 + MULV R10, R12, R26 // h2r1 + MULV R9, R12, R13 // h1r1.lo + MULHVU R9, R12, R16 // h1r1.hi + ADDV R13, R25, R25 + ADDV R16, R26, R27 + SGTU R13, R25, R24 + ADDV R27, R24, R26 + ADDV R8, R25, R25 + SGTU R8, R25, R24 + ADDV R24, R26, R26 + AND $3, R25, R10 + AND $-4, R25, R17 + ADDV R17, R14, R8 + ADDV R26, R15, R27 + SGTU R17, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + SLLV $62, R26, R27 + SRLV $2, R25, R28 + SRLV $2, R26, R26 + OR R27, R28, R25 + ADDV R25, R8, R8 + ADDV R26, R9, R27 + SGTU R25, R8, R24 + SGTU R26, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R24, R10, R10 + + SUBV $16, R6, R6 + BGE R6, R7, loop + +bytes_between_0_and_15: + BEQ R6, R0, done + MOVV $1, R14 + XOR R15, R15 + ADDV R6, R5, R5 + +flush_buffer: + MOVBU -1(R5), R25 + SRLV $56, R14, R24 + SLLV $8, R15, R28 + SLLV $8, R14, R14 + OR R24, R28, R15 + XOR R25, R14, R14 + SUBV $1, R6, R6 + SUBV $1, R5, R5 + BNE R6, R0, flush_buffer + + ADDV R14, R8, R8 + SGTU R14, R8, R24 + ADDV R15, R9, R27 + SGTU R15, R27, R28 + ADDV R27, R24, R9 + SGTU R27, R9, R24 + OR R24, R28, R24 + ADDV R10, R24, R10 + + MOVV $16, R6 + JMP multiply + +done: + MOVV R8, (R4) + MOVV R9, 8(R4) + MOVV R10, 16(R4) + RET diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go deleted file mode 100644 index 1a1679aaa..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego && (ppc64 || ppc64le) - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/pkcs12/crypto.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/pkcs12/crypto.go index 96f4a1a56..212538cb5 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/pkcs12/crypto.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -26,7 +26,7 @@ type pbeCipher interface { create(key []byte) (cipher.Block, error) // deriveKey returns a key derived from the given password and salt. deriveKey(salt, password []byte, iterations int) []byte - // deriveKey returns an IV derived from the given password and salt. + // deriveIV returns an IV derived from the given password and salt. deriveIV(salt, password []byte, iterations int) []byte } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/handshake.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/handshake.go index 56cdc7c21..b6bf546b4 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/handshake.go @@ -5,7 +5,6 @@ package ssh import ( - "crypto/rand" "errors" "fmt" "io" @@ -25,6 +24,11 @@ const debugHandshake = false // quickly. const chanSize = 16 +// maxPendingPackets sets the maximum number of packets to queue while waiting +// for KEX to complete. This limits the total pending data to maxPendingPackets +// * maxPacket bytes, which is ~16.8MB. +const maxPendingPackets = 64 + // keyingTransport is a packet based transport that supports key // changes. It need not be thread-safe. It should pass through // msgNewKeys in both directions. @@ -73,13 +77,22 @@ type handshakeTransport struct { incoming chan []byte readError error - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. + mu sync.Mutex + // Condition for the above mutex. It is used to notify a completed key + // exchange or a write failure. Writes can wait for this condition while a + // key exchange is in progress. + writeCond *sync.Cond + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + // Used to queue writes when a key exchange is in progress. The length is + // limited by pendingPacketsSize. Once full, writes will block until the key + // exchange is completed or an error occurs. If not empty, it is emptied + // all at once when the key exchange is completed in kexLoop. + pendingPackets [][]byte writePacketsLeft uint32 writeBytesLeft int64 + userAuthComplete bool // whether the user authentication phase is complete // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -133,6 +146,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, config: config, } + t.writeCond = sync.NewCond(&t.mu) t.resetReadThresholds() t.resetWriteThresholds() @@ -259,6 +273,7 @@ func (t *handshakeTransport) recordWriteError(err error) { defer t.mu.Unlock() if t.writeError == nil && err != nil { t.writeError = err + t.writeCond.Broadcast() } } @@ -362,6 +377,8 @@ write: } } t.pendingPackets = t.pendingPackets[:0] + // Unblock writePacket if waiting for KEX. + t.writeCond.Broadcast() t.mu.Unlock() } @@ -483,7 +500,7 @@ func (t *handshakeTransport) sendKexInit() error { CompressionClientServer: supportedCompressions, CompressionServerClient: supportedCompressions, } - io.ReadFull(rand.Reader, msg.Cookie[:]) + io.ReadFull(t.config.Rand, msg.Cookie[:]) // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, // and possibly to add the ext-info extension algorithm. Since the slice may be the @@ -552,26 +569,44 @@ func (t *handshakeTransport) sendKexInit() error { return nil } +var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") + func (t *handshakeTransport) writePacket(p []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + switch p[0] { case msgKexInit: return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: return errors.New("ssh: only handshakeTransport can send newKeys") + case msgUserAuthBanner: + if t.userAuthComplete { + return errSendBannerPhase + } + case msgUserAuthSuccess: + t.userAuthComplete = true } - t.mu.Lock() - defer t.mu.Unlock() if t.writeError != nil { return t.writeError } if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil + if len(t.pendingPackets) < maxPendingPackets { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + for t.sentInitMsg != nil { + // Block and wait for KEX to complete or an error. + t.writeCond.Wait() + if t.writeError != nil { + return t.writeError + } + } } if t.writeBytesLeft > 0 { @@ -588,6 +623,7 @@ func (t *handshakeTransport) writePacket(p []byte) error { if err := t.pushPacket(p); err != nil { t.writeError = err + t.writeCond.Broadcast() } return nil diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/messages.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/messages.go index b55f86056..118427bc0 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/messages.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/messages.go @@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) { return new(userAuthSuccessMsg), nil case msgUserAuthFailure: msg = new(userAuthFailureMsg) + case msgUserAuthBanner: + msg = new(userAuthBannerMsg) case msgUserAuthPubKeyOk: msg = new(userAuthPubKeyOkMsg) case msgGlobalRequest: diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/server.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/server.go index 5b5ccd96f..1839ddc6a 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/server.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/server.go @@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct { Server GSSAPIServer } +// SendAuthBanner implements [ServerPreAuthConn]. +func (s *connection) SendAuthBanner(msg string) error { + return s.transport.writePacket(Marshal(&userAuthBannerMsg{ + Message: msg, + })) +} + +func (*connection) unexportedMethodForFutureProofing() {} + +// ServerPreAuthConn is the interface available on an incoming server +// connection before authentication has completed. +type ServerPreAuthConn interface { + unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB + + ConnMetadata + + // SendAuthBanner sends a banner message to the client. + // It returns an error once the authentication phase has ended. + SendAuthBanner(string) error +} + // ServerConfig holds server specific configuration data. type ServerConfig struct { // Config contains configuration shared between client and server. @@ -118,6 +139,12 @@ type ServerConfig struct { // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) + // PreAuthConnCallback, if non-nil, is called upon receiving a new connection + // before any authentication has started. The provided ServerPreAuthConn + // can be used at any time before authentication is complete, including + // after this callback has returned. + PreAuthConnCallback func(ServerPreAuthConn) + // ServerVersion is the version identification string to announce in // the public handshake. // If empty, a reasonable default is used. @@ -488,6 +515,10 @@ func (b *BannerError) Error() string { } func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + if config.PreAuthConnCallback != nil { + config.PreAuthConnCallback(s) + } + sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions @@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err authFailures := 0 noneAuthCount := 0 var authErrs []error - var displayedBanner bool + var calledBannerCallback bool partialSuccessReturned := false // Set the initial authentication callbacks from the config. They can be // changed if a PartialSuccessError is returned. @@ -542,14 +573,10 @@ userAuthLoop: s.user = userAuthReq.User - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if !calledBannerCallback && config.BannerCallback != nil { + calledBannerCallback = true + if msg := config.BannerCallback(s); msg != "" { + if err := s.SendAuthBanner(msg); err != nil { return nil, err } } @@ -762,10 +789,7 @@ userAuthLoop: var bannerErr *BannerError if errors.As(authErr, &bannerErr) { if bannerErr.Message != "" { - bannerMsg := &userAuthBannerMsg{ - Message: bannerErr.Message, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if err := s.SendAuthBanner(bannerErr.Message); err != nil { return nil, err } } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/tcpip.go b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/tcpip.go index ef5059a11..93d844f03 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel return nil, err } go DiscardRequests(in) - return ch, err + return ch, nil } type tcpChan struct { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/context/context.go b/src/cmd/linuxkit/vendor/golang.org/x/net/context/context.go index cf66309c4..db1c95fab 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/context/context.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/context/go17.go b/src/cmd/linuxkit/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b86793..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/context/go19.go b/src/cmd/linuxkit/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a90..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/context/pre_go17.go b/src/cmd/linuxkit/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/context/pre_go19.go b/src/cmd/linuxkit/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a63803..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config.go index de58dfb8d..ca645d9a1 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config.go @@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { return conf } -// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2 // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config_go124.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config_go124.go index e3784123c..5b516c55f 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config_go124.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/config_go124.go @@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { fillNetHTTPConfig(conf, srv.HTTP2) } -// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { fillNetHTTPConfig(conf, tr.HTTP2) } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/frame.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/frame.go index 81faec7e7..97bd8b06f 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/frame.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/http2.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/http2.go index c7601c909..6c18ea230 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/http2.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/http2.go @@ -34,11 +34,19 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool - disableExtendedConnectProtocol bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -51,8 +59,8 @@ func init() { logFrameWrites = true logFrameReads = true } - if strings.Contains(e, "http2xconnect=0") { - disableExtendedConnectProtocol = true + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false } } @@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/server.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/server.go index b55547aec..51fca38f6 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/server.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } @@ -2233,25 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -3270,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/transport.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/transport.go index 090d0e1bd..f26356b9c 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/transport.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -375,6 +375,7 @@ type ClientConn struct { doNotReuse bool // whether conn is marked to not be reused for any future requests closing bool closed bool + closedOnIdle bool // true if conn was closed for idleness seenSettings bool // true if we've seen a settings frame, false otherwise seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back @@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { // If this connection has never been used for a request and is closed, // then let it take a request (which will fail). + // If the conn was closed for idleness, we're racing the idle timer; + // don't try to use the conn. (Issue #70515.) // // This avoids a situation where an error early in a connection's lifetime // goes unreported. - if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed { + if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle { st.canTakeNewRequest = true } @@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() { return } cc.closed = true + cc.closedOnIdle = true nextID := cc.nextStreamID // TODO: do clients send GOAWAY too? maybe? Just Close: cc.mu.Unlock() @@ -1271,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1299,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1360,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1492,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err - } - // wait for setting frames to be received, a server can change this value later, // but we just wait for the first settings frame var isExtendedConnect bool @@ -1659,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) - if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -2066,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -func isNormalConnect(req *http.Request) bool { - return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" -} - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if !isNormalConnect(req) { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if !isNormalConnect(req) { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2294,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2434,9 +2184,12 @@ func (rl *clientConnReadLoop) cleanup() { // This avoids a situation where new connections are constantly created, // added to the pool, fail, and are removed from the pool, without any error // being surfaced to the user. - const unusedWaitTime = 5 * time.Second + unusedWaitTime := 5 * time.Second + if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { + unusedWaitTime = cc.idleTimeout + } idleTime := cc.t.now().Sub(cc.lastActive) - if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime { + if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) @@ -2457,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2549,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } @@ -2646,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2654,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2778,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -3324,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3508,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/write.go b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/write.go index 6ff6bee7e..fdb35b947 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/write.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 000000000..ed14da5af --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/headermap.go b/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from src/cmd/linuxkit/vendor/golang.org/x/net/http2/headermap.go rename to src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd20..92483d8e4 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/net/http2/headermap.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/request.go b/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 000000000..4b7055317 --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/default.go b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/default.go index df958359a..0260935ba 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/default.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/default.go @@ -251,6 +251,12 @@ func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials // a Google Developers service account key file, a gcloud user credentials file (a.k.a. refresh // token JSON), or the JSON configuration file for workload identity federation in non-Google cloud // platforms (see https://cloud.google.com/iam/docs/how-to#using-workload-identity-federation). +// +// Important: If you accept a credential configuration (credential JSON/File/Stream) from an +// external source for authentication to Google Cloud Platform, you must validate it before +// providing it to any Google API or library. Providing an unvalidated credential configuration to +// Google APIs can compromise the security of your systems and data. For more information, refer to +// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params CredentialsParams) (*Credentials, error) { // Make defensive copy of the slices in params. params = params.deepCopy() @@ -294,6 +300,12 @@ func CredentialsFromJSONWithParams(ctx context.Context, jsonData []byte, params } // CredentialsFromJSON invokes CredentialsFromJSONWithParams with the specified scopes. +// +// Important: If you accept a credential configuration (credential JSON/File/Stream) from an +// external source for authentication to Google Cloud Platform, you must validate it before +// providing it to any Google API or library. Providing an unvalidated credential configuration to +// Google APIs can compromise the security of your systems and data. For more information, refer to +// [Validate credential configurations from external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { var params CredentialsParams params.Scopes = scopes diff --git a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go index ee34924e3..fc106347d 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/google/externalaccount/basecredentials.go @@ -278,20 +278,52 @@ type Format struct { type CredentialSource struct { // File is the location for file sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). File string `json:"file"` // Url is the URL to call for URL sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). URL string `json:"url"` // Headers are the headers to attach to the request for URL sourced credentials. Headers map[string]string `json:"headers"` // Executable is the configuration object for executable sourced credentials. // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). Executable *ExecutableConfig `json:"executable"` // EnvironmentID is the EnvironmentID used for AWS sourced credentials. This should start with "AWS". // One field amongst File, URL, Executable, or EnvironmentID should be provided, depending on the kind of credential in question. + // + // Important: If you accept a credential configuration (credential + // JSON/File/Stream) from an external source for authentication to Google + // Cloud Platform, you must validate it before providing it to any Google + // API or library. Providing an unvalidated credential configuration to + // Google APIs can compromise the security of your systems and data. For + // more information, refer to [Validate credential configurations from + // external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials). EnvironmentID string `json:"environment_id"` // RegionURL is the metadata URL to retrieve the region from for EC2 AWS credentials. RegionURL string `json:"region_url"` diff --git a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/jws/jws.go b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/jws/jws.go index 95015648b..6f03a49d3 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/jws/jws.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/jws/jws.go @@ -165,11 +165,11 @@ func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) { // Verify tests whether the provided JWT token's signature was produced by the private key // associated with the supplied public key. func Verify(token string, key *rsa.PublicKey) error { - parts := strings.Split(token, ".") - if len(parts) != 3 { + if strings.Count(token, ".") != 2 { return errors.New("jws: invalid token received, token must have 3 parts") } + parts := strings.SplitN(token, ".", 3) signedContent := parts[0] + "." + parts[1] signatureString, err := base64.RawURLEncoding.DecodeString(parts[2]) if err != nil { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/pkce.go b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/pkce.go index 50593b6df..6a95da975 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/oauth2/pkce.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/oauth2/pkce.go @@ -21,7 +21,7 @@ const ( // // A fresh verifier should be generated for each authorization. // S256ChallengeOption(verifier) should then be passed to Config.AuthCodeURL -// (or Config.DeviceAccess) and VerifierOption(verifier) to Config.Exchange +// (or Config.DeviceAuth) and VerifierOption(verifier) to Config.Exchange // (or Config.DeviceAccessToken). func GenerateVerifier() string { // "RECOMMENDED that the output of a suitable random number generator be @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { } // S256ChallengeOption derives a PKCE code challenge derived from verifier with -// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAccess +// method S256. It should be passed to Config.AuthCodeURL or Config.DeviceAuth // only. func S256ChallengeOption(verifier string) AuthCodeOption { return challengeOption{ diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/errgroup.go b/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63..cfafed5b5 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -12,13 +12,15 @@ package errgroup import ( "context" "fmt" + "runtime" + "runtime/debug" "sync" ) type token struct{} // A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. +// the same overall task. A Group should not be reused for different tasks. // // A zero Group is valid, has no limit on the number of active goroutines, // and does not cancel on error. @@ -31,6 +33,10 @@ type Group struct { errOnce sync.Once err error + + mu sync.Mutex + panicValue any // = PanicError | PanicValue; non-nil if some Group.Go goroutine panicked. + abnormal bool // some Group.Go goroutine terminated abnormally (panic or goexit). } func (g *Group) done() { @@ -46,36 +52,84 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. +// Wait blocks until all function calls from the Go method have returned +// normally, then returns the first non-nil error (if any) from them. +// +// If any of the calls panics, Wait panics with a [PanicValue]; +// and if any of them calls [runtime.Goexit], Wait calls runtime.Goexit. func (g *Group) Wait() error { g.wg.Wait() if g.cancel != nil { g.cancel(g.err) } + if g.panicValue != nil { + panic(g.panicValue) + } + if g.abnormal { + runtime.Goexit() + } return g.err } // Go calls the given function in a new goroutine. +// The first call to Go must happen before a Wait. // It blocks until the new goroutine can be added without the number of // active goroutines in the group exceeding the configured limit. // -// The first call to return a non-nil error cancels the group's context, if the -// group was created by calling WithContext. The error will be returned by Wait. +// It blocks until the new goroutine can be added without the number of +// goroutines in the group exceeding the configured limit. +// +// The first goroutine in the group that returns a non-nil error, panics, or +// invokes [runtime.Goexit] will cancel the associated Context, if any. func (g *Group) Go(f func() error) { if g.sem != nil { g.sem <- token{} } + g.add(f) +} + +func (g *Group) add(f func() error) { g.wg.Add(1) go func() { defer g.done() + normalReturn := false + defer func() { + if normalReturn { + return + } + v := recover() + g.mu.Lock() + defer g.mu.Unlock() + if !g.abnormal { + if g.cancel != nil { + g.cancel(g.err) + } + g.abnormal = true + } + if v != nil && g.panicValue == nil { + switch v := v.(type) { + case error: + g.panicValue = PanicError{ + Recovered: v, + Stack: debug.Stack(), + } + default: + g.panicValue = PanicValue{ + Recovered: v, + Stack: debug.Stack(), + } + } + } + }() - if err := f(); err != nil { + err := f() + normalReturn = true + if err != nil { g.errOnce.Do(func() { g.err = err if g.cancel != nil { @@ -100,24 +154,13 @@ func (g *Group) TryGo(f func() error) bool { } } - g.wg.Add(1) - go func() { - defer g.done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel(g.err) - } - }) - } - }() + g.add(f) return true } // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. @@ -133,3 +176,33 @@ func (g *Group) SetLimit(n int) { } g.sem = make(chan token, n) } + +// PanicError wraps an error recovered from an unhandled panic +// when calling a function passed to Go or TryGo. +type PanicError struct { + Recovered error + Stack []byte // result of call to [debug.Stack] +} + +func (p PanicError) Error() string { + // A Go Error method conventionally does not include a stack dump, so omit it + // here. (Callers who care can extract it from the Stack field.) + return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) +} + +func (p PanicError) Unwrap() error { return p.Recovered } + +// PanicValue wraps a value that does not implement the error interface, +// recovered from an unhandled panic when calling a function passed to Go or +// TryGo. +type PanicValue struct { + Recovered any + Stack []byte // result of call to [debug.Stack] +} + +func (p PanicValue) String() string { + if len(p.Stack) > 0 { + return fmt.Sprintf("recovered from errgroup.Group: %v\n%s", p.Recovered, p.Stack) + } + return fmt.Sprintf("recovered from errgroup.Group: %v", p.Recovered) +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/go120.go b/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b6..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/pre_go120.go b/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434..000000000 --- a/src/cmd/linuxkit/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b2..63541994e 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } @@ -146,6 +149,18 @@ var ARM struct { _ CacheLinePad } +// The booleans in Loong64 contain the correspondingly named cpu feature bit. +// The struct is padded to avoid false sharing. +var Loong64 struct { + _ CacheLinePad + HasLSX bool // support 128-bit vector extension + HasLASX bool // support 256-bit vector extension + HasCRC32 bool // support CRC instruction + HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction + HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction + _ CacheLinePad +} + // MIPS64X contains the supported CPU features of the current mips64/mips64le // platforms. If the current platform is not mips64/mips64le or the current // operating system is not Linux then all feature flags are false. @@ -217,6 +232,17 @@ var RISCV64 struct { HasZba bool // Address generation instructions extension HasZbb bool // Basic bit-manipulation extension HasZbs bool // Single-bit instructions extension + HasZvbb bool // Vector Basic Bit-manipulation + HasZvbc bool // Vector Carryless Multiplication + HasZvkb bool // Vector Cryptography Bit-manipulation + HasZvkt bool // Vector Data-Independent Execution Latency + HasZvkg bool // Vector GCM/GMAC + HasZvkn bool // NIST Algorithm Suite (AES/SHA256/SHA512) + HasZvknc bool // NIST Algorithm Suite with carryless multiply + HasZvkng bool // NIST Algorithm Suite with GCM + HasZvks bool // ShangMi Algorithm Suite + HasZvksc bool // ShangMi Algorithm Suite with carryless multiplication + HasZvksg bool // ShangMi Algorithm Suite with GCM _ CacheLinePad } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go new file mode 100644 index 000000000..4f3411432 --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go @@ -0,0 +1,22 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +// HWCAP bits. These are exposed by the Linux kernel. +const ( + hwcap_LOONGARCH_LSX = 1 << 4 + hwcap_LOONGARCH_LASX = 1 << 5 +) + +func doinit() { + // TODO: Features that require kernel support like LSX and LASX can + // be detected here once needed in std library or by the compiler. + Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX) + Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX) +} + +func hwcIsSet(hwc uint, val uint) bool { + return hwc&val != 0 +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index 7d902b684..a428dec9c 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 +//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go index cb4a0c572..ad741536f 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -58,6 +58,15 @@ const ( riscv_HWPROBE_EXT_ZBA = 0x8 riscv_HWPROBE_EXT_ZBB = 0x10 riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_EXT_ZVBB = 0x20000 + riscv_HWPROBE_EXT_ZVBC = 0x40000 + riscv_HWPROBE_EXT_ZVKB = 0x80000 + riscv_HWPROBE_EXT_ZVKG = 0x100000 + riscv_HWPROBE_EXT_ZVKNED = 0x200000 + riscv_HWPROBE_EXT_ZVKNHB = 0x800000 + riscv_HWPROBE_EXT_ZVKSED = 0x1000000 + riscv_HWPROBE_EXT_ZVKSH = 0x2000000 + riscv_HWPROBE_EXT_ZVKT = 0x4000000 riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 riscv_HWPROBE_MISALIGNED_FAST = 0x3 riscv_HWPROBE_MISALIGNED_MASK = 0x7 @@ -99,6 +108,20 @@ func doinit() { RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + RISCV64.HasZvbb = isSet(v, riscv_HWPROBE_EXT_ZVBB) + RISCV64.HasZvbc = isSet(v, riscv_HWPROBE_EXT_ZVBC) + RISCV64.HasZvkb = isSet(v, riscv_HWPROBE_EXT_ZVKB) + RISCV64.HasZvkg = isSet(v, riscv_HWPROBE_EXT_ZVKG) + RISCV64.HasZvkt = isSet(v, riscv_HWPROBE_EXT_ZVKT) + // Cryptography shorthand extensions + RISCV64.HasZvkn = isSet(v, riscv_HWPROBE_EXT_ZVKNED) && + isSet(v, riscv_HWPROBE_EXT_ZVKNHB) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvknc = RISCV64.HasZvkn && RISCV64.HasZvbc + RISCV64.HasZvkng = RISCV64.HasZvkn && RISCV64.HasZvkg + RISCV64.HasZvks = isSet(v, riscv_HWPROBE_EXT_ZVKSED) && + isSet(v, riscv_HWPROBE_EXT_ZVKSH) && RISCV64.HasZvkb && RISCV64.HasZvkt + RISCV64.HasZvksc = RISCV64.HasZvks && RISCV64.HasZvbc + RISCV64.HasZvksg = RISCV64.HasZvks && RISCV64.HasZvkg } if pairs[1].key != -1 { v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.go index 558635850..45ecb29ae 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.go @@ -8,5 +8,43 @@ package cpu const cacheLineSize = 64 +// Bit fields for CPUCFG registers, Related reference documents: +// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg +const ( + // CPUCFG1 bits + cpucfg1_CRC32 = 1 << 25 + + // CPUCFG2 bits + cpucfg2_LAM_BH = 1 << 27 + cpucfg2_LAMCAS = 1 << 28 +) + func initOptions() { + options = []option{ + {Name: "lsx", Feature: &Loong64.HasLSX}, + {Name: "lasx", Feature: &Loong64.HasLASX}, + {Name: "crc32", Feature: &Loong64.HasCRC32}, + {Name: "lam_bh", Feature: &Loong64.HasLAM_BH}, + {Name: "lamcas", Feature: &Loong64.HasLAMCAS}, + } + + // The CPUCFG data on Loong64 only reflects the hardware capabilities, + // not the kernel support status, so features such as LSX and LASX that + // require kernel support cannot be obtained from the CPUCFG data. + // + // These features only require hardware capability support and do not + // require kernel specific support, so they can be obtained directly + // through CPUCFG + cfg1 := get_cpucfg(1) + cfg2 := get_cpucfg(2) + + Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32) + Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS) + Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH) +} + +func get_cpucfg(reg uint32) uint32 + +func cfgIsSet(cfg uint32, val uint32) bool { + return cfg&val != 0 } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.s new file mode 100644 index 000000000..71cbaf1ce --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_loong64.s @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func get_cpucfg(reg uint32) uint32 +TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0 + MOVW reg+0(FP), R5 + // CPUCFG R5, R4 = 0x00006ca4 + WORD $0x00006ca4 + MOVW R4, ret+8(FP) + RET diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index aca3199c9..0f617aef5 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -16,5 +16,17 @@ func initOptions() { {Name: "zba", Feature: &RISCV64.HasZba}, {Name: "zbb", Feature: &RISCV64.HasZbb}, {Name: "zbs", Feature: &RISCV64.HasZbs}, + // RISC-V Cryptography Extensions + {Name: "zvbb", Feature: &RISCV64.HasZvbb}, + {Name: "zvbc", Feature: &RISCV64.HasZvbc}, + {Name: "zvkb", Feature: &RISCV64.HasZvkb}, + {Name: "zvkg", Feature: &RISCV64.HasZvkg}, + {Name: "zvkt", Feature: &RISCV64.HasZvkt}, + {Name: "zvkn", Feature: &RISCV64.HasZvkn}, + {Name: "zvknc", Feature: &RISCV64.HasZvknc}, + {Name: "zvkng", Feature: &RISCV64.HasZvkng}, + {Name: "zvks", Feature: &RISCV64.HasZvks}, + {Name: "zvksc", Feature: &RISCV64.HasZvksc}, + {Name: "zvksg", Feature: &RISCV64.HasZvksg}, } } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_x86.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a68078..1e642f330 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/parse.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/parse.go index 762b63d68..56a7e1a17 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/parse.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/cpu/parse.go @@ -13,7 +13,7 @@ import "strconv" // https://golang.org/cl/209597. func parseRelease(rel string) (major, minor, patch int, ok bool) { // Strip anything after a dash or plus. - for i := 0; i < len(rel); i++ { + for i := range len(rel) { if rel[i] == '-' || rel[i] == '+' { rel = rel[:i] break @@ -21,7 +21,7 @@ func parseRelease(rel string) (major, minor, patch int, ok bool) { } next := func() (int, bool) { - for i := 0; i < len(rel); i++ { + for i := range len(rel) { if rel[i] == '.' { ver, err := strconv.Atoi(rel[:i]) rel = rel[i+1:] diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/auxv.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 000000000..37a82528f --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 000000000..1200487f2 --- /dev/null +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin.go index 099867dee..798f61ad3 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,7 +602,150 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) +// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) +const minIovec = 8 + +func Readv(fd int, iovs [][]byte) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = readv(fd, iovecs) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + n, err = preadv(fd, iovecs, offset) + readvRacedetect(iovecs, n, err) + return n, err +} + +func Writev(fd int, iovs [][]byte) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = writev(fd, iovecs) + writevRacedetect(iovecs, n) + return n, err +} + +func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { + if !darwinKernelVersionMin(11, 0, 0) { + return 0, ENOSYS + } + + iovecs := make([]Iovec, 0, minIovec) + iovecs = appendBytes(iovecs, iovs) + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + n, err = pwritev(fd, iovecs, offset) + writevRacedetect(iovecs, n) + return n, err +} + +func appendBytes(vecs []Iovec, bs [][]byte) []Iovec { + for _, b := range bs { + var v Iovec + v.SetLen(len(b)) + if len(b) > 0 { + v.Base = &b[0] + } else { + v.Base = (*byte)(unsafe.Pointer(&_zero)) + } + vecs = append(vecs, v) + } + return vecs +} + +func writevRacedetect(iovecs []Iovec, n int) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceReadRange(unsafe.Pointer(iovecs[i].Base), m) + } + } +} + +func readvRacedetect(iovecs []Iovec, n int, err error) { + if !raceenabled { + return + } + for i := 0; n > 0 && i < len(iovecs); i++ { + m := int(iovecs[i].Len) + if m > n { + m = n + } + n -= m + if m > 0 { + raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) + } + } + if err == nil { + raceAcquire(unsafe.Pointer(&ioSync)) + } +} + +func darwinMajorMinPatch() (maj, min, patch int, err error) { + var un Utsname + err = Uname(&un) + if err != nil { + return + } + + var mmp [3]int + c := 0 +Loop: + for _, b := range un.Release[:] { + switch { + case b >= '0' && b <= '9': + mmp[c] = 10*mmp[c] + int(b-'0') + case b == '.': + c++ + if c > 2 { + return 0, 0, 0, ENOTSUP + } + case b == 0: + break Loop + default: + return 0, 0, 0, ENOTSUP + } + } + if c != 2 { + return 0, 0, 0, ENOTSUP + } + return mmp[0], mmp[1], mmp[2], nil +} + +func darwinKernelVersionMin(maj, min, patch int) bool { + actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() + if err != nil { + return false + } + return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) +} + //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) @@ -705,3 +848,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI //sys write(fd int, p []byte) (n int, err error) //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys readv(fd int, iovecs []Iovec) (n int, err error) +//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) +//sys writev(fd int, iovecs []Iovec) (n int, err error) +//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux.go index 230a94549..4958a6570 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -13,6 +13,7 @@ package unix import ( "encoding/binary" + "slices" "strconv" "syscall" "time" @@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) { return nil, 0, EINVAL } sa.raw.Family = AF_UNIX - for i := 0; i < n; i++ { + for i := range n { sa.raw.Path[i] = int8(name[i]) } // length is family (uint16), name, NUL. @@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) { psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm)) psm[0] = byte(sa.PSM) psm[1] = byte(sa.PSM >> 8) - for i := 0; i < len(sa.Addr); i++ { + for i := range len(sa.Addr) { sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i] } cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid)) @@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i] = rx[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+4] = tx[i] } return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil @@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { sa.raw.Family = AF_CAN sa.raw.Ifindex = int32(sa.Ifindex) n := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Addr[i] = n[i] } p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { sa.raw.Addr[i+8] = p[i] } sa.raw.Addr[12] = sa.Addr @@ -911,7 +912,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { // These are EBCDIC encoded by the kernel, but we still need to pad them // with blanks. Initializing with blanks allows the caller to feed in either // a padded or an unpadded string. - for i := 0; i < 8; i++ { + for i := range 8 { sa.raw.Nodeid[i] = ' ' sa.raw.User_id[i] = ' ' sa.raw.Name[i] = ' ' @@ -1148,7 +1149,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { var user [8]byte var name [8]byte - for i := 0; i < 8; i++ { + for i := range 8 { user[i] = byte(pp.User_id[i]) name[i] = byte(pp.Name[i]) } @@ -1173,11 +1174,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } name := (*[8]byte)(unsafe.Pointer(&sa.Name)) - for i := 0; i < 8; i++ { + for i := range 8 { name[i] = pp.Addr[i] } pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) - for i := 0; i < 4; i++ { + for i := range 4 { pgn[i] = pp.Addr[i+8] } addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) @@ -1188,11 +1189,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { Ifindex: int(pp.Ifindex), } rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { + for i := range 4 { rx[i] = pp.Addr[i] } tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { + for i := range 4 { tx[i] = pp.Addr[i+4] } return sa, nil @@ -2216,10 +2217,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceWriteRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2270,10 +2268,7 @@ func writevRacedetect(iovecs []Iovec, n int) { return } for i := 0; n > 0 && i < len(iovecs); i++ { - m := int(iovecs[i].Len) - if m > n { - m = n - } + m := min(int(iovecs[i].Len), n) n -= m if m > 0 { raceReadRange(unsafe.Pointer(iovecs[i].Base), m) @@ -2320,12 +2315,7 @@ func isGroupMember(gid int) bool { return false } - for _, g := range groups { - if g == gid { - return true - } - } - return false + return slices.Contains(groups, gid) } func isCapDacOverrideSet() bool { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_solaris.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af06..abc395547 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3f..4f432bfe8 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e320..75207613c 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f0..c68acda53 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4a..a8c607ab8 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f30..18563dd8d 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573f..22912cdaa 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e064..29344eb37 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25bd..20d51fb96 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66d..321b60902 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c04419..9bacdf1e2 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737cd..c22427261 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467e..6270c8ee1 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc60..9966c1941 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c55..848e5fcc4 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f040097..669b2adb8 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a31332..4834e5751 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 24b346e1a..813c05b66 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index ebd213100..fda328582 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat64(SB) GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 824b9c2d5..e6f58f3c6 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func readv(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_readv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_preadv_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func writev(fd int, iovecs []Iovec) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs))) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_writev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) { + var _p0 unsafe.Pointer + if len(iovecs) > 0 { + _p0 = unsafe.Pointer(&iovecs[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_pwritev_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstat(fd int, stat *Stat_t) (err error) { _, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 4f178a229..7f8998b90 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8 DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB) +TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_readv(SB) +GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB) + +TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_preadv(SB) +GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8 +DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB) + +TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_writev(SB) +GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB) + +TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_pwritev(SB) +GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8 +DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB) + TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_fstat(SB) GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb..c6545413c 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820c..c79aaff30 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf45..5eb450695 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b..05e502974 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe8..38c53ec51 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da..31d2e71a1 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1ef..f4184a336 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5e..05b996227 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a..43a256e9e 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a0..eea5ddfc2 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7e..0d777bfbb 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d5..b44636502 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416da..0c7d21c18 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766..840539169 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825b..fcf1b790d 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77c..52d15b5f9 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_linux.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148dc..a46abe647 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/key.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/key.go index fd8632444..39aeeb644 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/key.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/key.go @@ -164,7 +164,12 @@ loopItems: func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { var h syscall.Handle var d uint32 - err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + var pathPointer *uint16 + pathPointer, err = syscall.UTF16PtrFromString(path) + if err != nil { + return 0, false, err + } + err = regCreateKeyEx(syscall.Handle(k), pathPointer, 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) if err != nil { return 0, false, err @@ -174,7 +179,11 @@ func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool // DeleteKey deletes the subkey path of key k and its values. func DeleteKey(k Key, path string) error { - return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) + pathPointer, err := syscall.UTF16PtrFromString(path) + if err != nil { + return err + } + return regDeleteKey(syscall.Handle(k), pathPointer) } // A KeyInfo describes the statistics of a key. It is returned by Stat. diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/value.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/value.go index 74db26b94..a1bcbb236 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/value.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/registry/value.go @@ -340,7 +340,11 @@ func (k Key) SetBinaryValue(name string, value []byte) error { // DeleteValue removes a named value from the key k. func (k Key) DeleteValue(name string) error { - return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) + namePointer, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + return regDeleteValue(syscall.Handle(k), namePointer) } // ReadValueNames returns the value names of key k. diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/security_windows.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/security_windows.go index b6e1ab76f..a8b0364c7 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/security_windows.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/security_windows.go @@ -1303,7 +1303,10 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE return nil, err } if absoluteSDSize > 0 { - absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + absoluteSD = new(SECURITY_DESCRIPTOR) + if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) { + panic("sizeof(SECURITY_DESCRIPTOR) too small") + } } var ( dacl *ACL @@ -1312,19 +1315,55 @@ func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DE group *SID ) if daclSize > 0 { - dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize)))) } if saclSize > 0 { - sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize)))) } if ownerSize > 0 { - owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize)))) } if groupSize > 0 { - group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize)))) } + // We call into Windows via makeAbsoluteSD, which sets up + // pointers within absoluteSD that point to other chunks of memory + // we pass into makeAbsoluteSD, and that happens outside the view of the GC. + // We therefore take some care here to then verify the pointers are as we expect + // and set them explicitly in view of the GC. See https://go.dev/issue/73199. + // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575. err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + if err != nil { + // Don't return absoluteSD, which might be partially initialized. + return nil, err + } + // Before using any fields, verify absoluteSD is in the format we expect according to Windows. + // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors + absControl, _, err := absoluteSD.Control() + if err != nil { + panic("absoluteSD: " + err.Error()) + } + if absControl&SE_SELF_RELATIVE != 0 { + panic("absoluteSD not in absolute format") + } + if absoluteSD.dacl != dacl { + panic("dacl pointer mismatch") + } + if absoluteSD.sacl != sacl { + panic("sacl pointer mismatch") + } + if absoluteSD.owner != owner { + panic("owner pointer mismatch") + } + if absoluteSD.group != group { + panic("group pointer mismatch") + } + absoluteSD.dacl = dacl + absoluteSD.sacl = sacl + absoluteSD.owner = owner + absoluteSD.group = group + return } diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/syscall_windows.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/syscall_windows.go index 4a3254386..640f6b153 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -870,6 +870,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom //sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo //sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW +//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW //sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname //sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname //sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs @@ -1698,8 +1699,9 @@ func NewNTUnicodeString(s string) (*NTUnicodeString, error) { // Slice returns a uint16 slice that aliases the data in the NTUnicodeString. func (s *NTUnicodeString) Slice() []uint16 { - slice := unsafe.Slice(s.Buffer, s.MaximumLength) - return slice[:s.Length] + // Note: this rounds the length down, if it happens + // to (incorrectly) be odd. Probably safer than rounding up. + return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2] } func (s *NTUnicodeString) String() string { diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/types_windows.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/types_windows.go index 9d138de5f..958bcf47a 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/types_windows.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/types_windows.go @@ -1074,6 +1074,7 @@ const ( IP_ADD_MEMBERSHIP = 0xc IP_DROP_MEMBERSHIP = 0xd IP_PKTINFO = 0x13 + IP_MTU_DISCOVER = 0x47 IPV6_V6ONLY = 0x1b IPV6_UNICAST_HOPS = 0x4 @@ -1083,6 +1084,7 @@ const ( IPV6_JOIN_GROUP = 0xc IPV6_LEAVE_GROUP = 0xd IPV6_PKTINFO = 0x13 + IPV6_MTU_DISCOVER = 0x47 MSG_OOB = 0x1 MSG_PEEK = 0x2 @@ -1132,6 +1134,15 @@ const ( WSASYS_STATUS_LEN = 128 ) +// enum PMTUD_STATE from ws2ipdef.h +const ( + IP_PMTUDISC_NOT_SET = 0 + IP_PMTUDISC_DO = 1 + IP_PMTUDISC_DONT = 2 + IP_PMTUDISC_PROBE = 3 + IP_PMTUDISC_MAX = 4 +) + type WSABuf struct { Len uint32 Buf *byte @@ -1146,6 +1157,22 @@ type WSAMsg struct { Flags uint32 } +type WSACMSGHDR struct { + Len uintptr + Level int32 + Type int32 +} + +type IN_PKTINFO struct { + Addr [4]byte + Ifindex uint32 +} + +type IN6_PKTINFO struct { + Addr [16]byte + Ifindex uint32 +} + // Flags for WSASocket const ( WSA_FLAG_OVERLAPPED = 0x01 @@ -2673,6 +2700,8 @@ type CommTimeouts struct { // NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING. type NTUnicodeString struct { + // Note: Length and MaximumLength are in *bytes*, not uint16s. + // They should always be even. Length uint16 MaximumLength uint16 Buffer *uint16 @@ -3601,3 +3630,213 @@ const ( KLF_NOTELLSHELL = 0x00000080 KLF_SETFORPROCESS = 0x00000100 ) + +// Virtual Key codes +// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes +const ( + VK_LBUTTON = 0x01 + VK_RBUTTON = 0x02 + VK_CANCEL = 0x03 + VK_MBUTTON = 0x04 + VK_XBUTTON1 = 0x05 + VK_XBUTTON2 = 0x06 + VK_BACK = 0x08 + VK_TAB = 0x09 + VK_CLEAR = 0x0C + VK_RETURN = 0x0D + VK_SHIFT = 0x10 + VK_CONTROL = 0x11 + VK_MENU = 0x12 + VK_PAUSE = 0x13 + VK_CAPITAL = 0x14 + VK_KANA = 0x15 + VK_HANGEUL = 0x15 + VK_HANGUL = 0x15 + VK_IME_ON = 0x16 + VK_JUNJA = 0x17 + VK_FINAL = 0x18 + VK_HANJA = 0x19 + VK_KANJI = 0x19 + VK_IME_OFF = 0x1A + VK_ESCAPE = 0x1B + VK_CONVERT = 0x1C + VK_NONCONVERT = 0x1D + VK_ACCEPT = 0x1E + VK_MODECHANGE = 0x1F + VK_SPACE = 0x20 + VK_PRIOR = 0x21 + VK_NEXT = 0x22 + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + VK_SELECT = 0x29 + VK_PRINT = 0x2A + VK_EXECUTE = 0x2B + VK_SNAPSHOT = 0x2C + VK_INSERT = 0x2D + VK_DELETE = 0x2E + VK_HELP = 0x2F + VK_LWIN = 0x5B + VK_RWIN = 0x5C + VK_APPS = 0x5D + VK_SLEEP = 0x5F + VK_NUMPAD0 = 0x60 + VK_NUMPAD1 = 0x61 + VK_NUMPAD2 = 0x62 + VK_NUMPAD3 = 0x63 + VK_NUMPAD4 = 0x64 + VK_NUMPAD5 = 0x65 + VK_NUMPAD6 = 0x66 + VK_NUMPAD7 = 0x67 + VK_NUMPAD8 = 0x68 + VK_NUMPAD9 = 0x69 + VK_MULTIPLY = 0x6A + VK_ADD = 0x6B + VK_SEPARATOR = 0x6C + VK_SUBTRACT = 0x6D + VK_DECIMAL = 0x6E + VK_DIVIDE = 0x6F + VK_F1 = 0x70 + VK_F2 = 0x71 + VK_F3 = 0x72 + VK_F4 = 0x73 + VK_F5 = 0x74 + VK_F6 = 0x75 + VK_F7 = 0x76 + VK_F8 = 0x77 + VK_F9 = 0x78 + VK_F10 = 0x79 + VK_F11 = 0x7A + VK_F12 = 0x7B + VK_F13 = 0x7C + VK_F14 = 0x7D + VK_F15 = 0x7E + VK_F16 = 0x7F + VK_F17 = 0x80 + VK_F18 = 0x81 + VK_F19 = 0x82 + VK_F20 = 0x83 + VK_F21 = 0x84 + VK_F22 = 0x85 + VK_F23 = 0x86 + VK_F24 = 0x87 + VK_NUMLOCK = 0x90 + VK_SCROLL = 0x91 + VK_OEM_NEC_EQUAL = 0x92 + VK_OEM_FJ_JISHO = 0x92 + VK_OEM_FJ_MASSHOU = 0x93 + VK_OEM_FJ_TOUROKU = 0x94 + VK_OEM_FJ_LOYA = 0x95 + VK_OEM_FJ_ROYA = 0x96 + VK_LSHIFT = 0xA0 + VK_RSHIFT = 0xA1 + VK_LCONTROL = 0xA2 + VK_RCONTROL = 0xA3 + VK_LMENU = 0xA4 + VK_RMENU = 0xA5 + VK_BROWSER_BACK = 0xA6 + VK_BROWSER_FORWARD = 0xA7 + VK_BROWSER_REFRESH = 0xA8 + VK_BROWSER_STOP = 0xA9 + VK_BROWSER_SEARCH = 0xAA + VK_BROWSER_FAVORITES = 0xAB + VK_BROWSER_HOME = 0xAC + VK_VOLUME_MUTE = 0xAD + VK_VOLUME_DOWN = 0xAE + VK_VOLUME_UP = 0xAF + VK_MEDIA_NEXT_TRACK = 0xB0 + VK_MEDIA_PREV_TRACK = 0xB1 + VK_MEDIA_STOP = 0xB2 + VK_MEDIA_PLAY_PAUSE = 0xB3 + VK_LAUNCH_MAIL = 0xB4 + VK_LAUNCH_MEDIA_SELECT = 0xB5 + VK_LAUNCH_APP1 = 0xB6 + VK_LAUNCH_APP2 = 0xB7 + VK_OEM_1 = 0xBA + VK_OEM_PLUS = 0xBB + VK_OEM_COMMA = 0xBC + VK_OEM_MINUS = 0xBD + VK_OEM_PERIOD = 0xBE + VK_OEM_2 = 0xBF + VK_OEM_3 = 0xC0 + VK_OEM_4 = 0xDB + VK_OEM_5 = 0xDC + VK_OEM_6 = 0xDD + VK_OEM_7 = 0xDE + VK_OEM_8 = 0xDF + VK_OEM_AX = 0xE1 + VK_OEM_102 = 0xE2 + VK_ICO_HELP = 0xE3 + VK_ICO_00 = 0xE4 + VK_PROCESSKEY = 0xE5 + VK_ICO_CLEAR = 0xE6 + VK_OEM_RESET = 0xE9 + VK_OEM_JUMP = 0xEA + VK_OEM_PA1 = 0xEB + VK_OEM_PA2 = 0xEC + VK_OEM_PA3 = 0xED + VK_OEM_WSCTRL = 0xEE + VK_OEM_CUSEL = 0xEF + VK_OEM_ATTN = 0xF0 + VK_OEM_FINISH = 0xF1 + VK_OEM_COPY = 0xF2 + VK_OEM_AUTO = 0xF3 + VK_OEM_ENLW = 0xF4 + VK_OEM_BACKTAB = 0xF5 + VK_ATTN = 0xF6 + VK_CRSEL = 0xF7 + VK_EXSEL = 0xF8 + VK_EREOF = 0xF9 + VK_PLAY = 0xFA + VK_ZOOM = 0xFB + VK_NONAME = 0xFC + VK_PA1 = 0xFD + VK_OEM_CLEAR = 0xFE +) + +// Mouse button constants. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001 + RIGHTMOST_BUTTON_PRESSED = 0x0002 + FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004 + FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008 + FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010 +) + +// Control key state constaints. +// https://docs.microsoft.com/en-us/windows/console/key-event-record-str +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 + LEFT_ALT_PRESSED = 0x0002 + LEFT_CTRL_PRESSED = 0x0008 + NUMLOCK_ON = 0x0020 + RIGHT_ALT_PRESSED = 0x0001 + RIGHT_CTRL_PRESSED = 0x0004 + SCROLLLOCK_ON = 0x0040 + SHIFT_PRESSED = 0x0010 +) + +// Mouse event record event flags. +// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +const ( + MOUSE_MOVED = 0x0001 + DOUBLE_CLICK = 0x0002 + MOUSE_WHEELED = 0x0004 + MOUSE_HWHEELED = 0x0008 +) + +// Input Record Event Types +// https://learn.microsoft.com/en-us/windows/console/input-record-str +const ( + FOCUS_EVENT = 0x0010 + KEY_EVENT = 0x0001 + MENU_EVENT = 0x0008 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 +) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 01c0716c2..a58bc48b8 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -511,6 +511,7 @@ var ( procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW") procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSAIoctl = modws2_32.NewProc("WSAIoctl") @@ -4391,6 +4392,14 @@ func WSACleanup() (err error) { return } +func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) { + r1, _, e1 := syscall.Syscall(procWSADuplicateSocketW.Addr(), 3, uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info))) + if r1 != 0 { + err = errnoErr(e1) + } + return +} + func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) n = int32(r0) diff --git a/src/cmd/linuxkit/vendor/golang.org/x/term/terminal.go b/src/cmd/linuxkit/vendor/golang.org/x/term/terminal.go index f636667fb..14f89470a 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/term/terminal.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/term/terminal.go @@ -44,6 +44,8 @@ type Terminal struct { // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. + // + // This will be disabled during ReadPassword. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. @@ -692,6 +694,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) { // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. +// +// The AutoCompleteCallback is disabled during this call. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() @@ -699,6 +703,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) { oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false + oldAutoCompleteCallback := t.AutoCompleteCallback + t.AutoCompleteCallback = nil + defer func() { + t.AutoCompleteCallback = oldAutoCompleteCallback + }() line, err = t.readLine() diff --git a/src/cmd/linuxkit/vendor/golang.org/x/text/language/parse.go b/src/cmd/linuxkit/vendor/golang.org/x/text/language/parse.go index 4d57222e7..053336e28 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/text/language/parse.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/src/cmd/linuxkit/vendor/golang.org/x/time/rate/rate.go b/src/cmd/linuxkit/vendor/golang.org/x/time/rate/rate.go index 8f6c7f493..794b2e32b 100644 --- a/src/cmd/linuxkit/vendor/golang.org/x/time/rate/rate.go +++ b/src/cmd/linuxkit/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -185,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -306,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -323,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -344,21 +345,9 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -391,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -407,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -416,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go index e7d3805e3..f388426b0 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -159,14 +159,14 @@ var file_google_api_httpbody_proto_rawDesc = []byte{ 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, - 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, - 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41, + 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go index 3e5621827..3cd9a5bb8 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go @@ -80,11 +80,12 @@ type ErrorInfo struct { Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // - // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should + // ideally be lowerCamelCase. Also, they must be limited to 64 characters in // length. When identifying the current value of an exceeded limit, the units // should be contained in the key, not the value. For example, rather than - // {"instanceLimit": "100/request"}, should be returned as, - // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // `{"instanceLimit": "100/request"}`, should be returned as, + // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of // instances that can be created in a single (batch) request. Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } @@ -870,6 +871,16 @@ type BadRequest_FieldViolation struct { Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The reason of the field-level error. This is a constant value that + // identifies the proximate cause of the field-level error. It should + // uniquely identify the type of the FieldViolation within the scope of the + // google.rpc.ErrorInfo.domain. This should be at most 63 + // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, + // which represents UPPER_SNAKE_CASE. + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` + // Provides a localized error message for field-level errors that is safe to + // return to the API consumer. + LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -918,6 +929,20 @@ func (x *BadRequest_FieldViolation) GetDescription() string { return "" } +func (x *BadRequest_FieldViolation) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage { + if x != nil { + return x.LocalizedMessage + } + return nil +} + // Describes a URL link. type Help_Link struct { state protoimpl.MessageState @@ -1026,51 +1051,57 @@ var file_google_rpc_error_details_proto_rawDesc = []byte{ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa8, 0x01, 0x0a, 0x0a, 0x42, 0x61, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a, 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x48, 0x0a, 0x0e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, - 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, - 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, - 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, - 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, - 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, - 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, - 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, - 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, - 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, - 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, - 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, - 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01, 0x0a, 0x0e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, + 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x49, 0x0a, + 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a, 0x0c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x6f, 0x0a, 0x04, + 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, + 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x44, 0x0a, + 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, + 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02, 0x03, 0x52, 0x50, + 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1111,11 +1142,12 @@ var file_google_rpc_error_details_proto_depIdxs = []int32{ 12, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation 13, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation 14, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 9, // 6: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_google_rpc_error_details_proto_init() } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/balancer.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/balancer.go index 382ad6941..c9b343c71 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/balancer.go @@ -129,6 +129,13 @@ type State struct { // brand new implementation of this interface. For the situations like // testing, the new implementation should embed this interface. This allows // gRPC to add new methods to this interface. +// +// NOTICE: This interface is intended to be implemented by gRPC, or intercepted +// by custom load balancing polices. Users should not need their own complete +// implementation of this interface -- they should always delegate to a +// ClientConn passed to Builder.Build() by embedding it in their +// implementations. An embedded ClientConn must never be nil, or runtime panics +// will occur. type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. @@ -167,6 +174,17 @@ type ClientConn interface { // // Deprecated: Use the Target field in the BuildOptions instead. Target() string + + // MetricsRecorder provides the metrics recorder that balancers can use to + // record metrics. Balancer implementations which do not register metrics on + // metrics registry and record on them can ignore this method. The returned + // MetricsRecorder is guaranteed to never be nil. + MetricsRecorder() estats.MetricsRecorder + + // EnforceClientConnEmbedding is included to force implementers to embed + // another implementation of this interface, allowing gRPC to add methods + // without breaking users. + internal.EnforceClientConnEmbedding } // BuildOptions contains additional information for Build. @@ -198,10 +216,6 @@ type BuildOptions struct { // same resolver.Target as passed to the resolver. See the documentation for // the resolver.Target type for details about what it contains. Target resolver.Target - // MetricsRecorder is the metrics recorder that balancers can use to record - // metrics. Balancer implementations which do not register metrics on - // metrics registry and record on them can ignore this field. - MetricsRecorder estats.MetricsRecorder } // Builder creates a balancer. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/base/balancer.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/base/balancer.go index d5ed172ae..4d576876d 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -41,7 +41,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) ba cc: cc, pickerBuilder: bb.pickerBuilder, - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[balancer.SubConn](), scStates: make(map[balancer.SubConn]connectivity.State), csEvltr: &balancer.ConnectivityStateEvaluator{}, config: bb.config, @@ -65,7 +65,7 @@ type baseBalancer struct { csEvltr *balancer.ConnectivityStateEvaluator state connectivity.State - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[balancer.SubConn] scStates map[balancer.SubConn]connectivity.State picker balancer.Picker config Config @@ -100,7 +100,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { // Successful resolution; clear resolver error and ensure we return nil. b.resolverErr = nil // addrsSet is the set converted from addrs, it's used for quick lookup of an address. - addrsSet := resolver.NewAddressMap() + addrsSet := resolver.NewAddressMapV2[any]() for _, a := range s.ResolverState.Addresses { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { @@ -122,8 +122,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { } } for _, a := range b.subConns.Keys() { - sci, _ := b.subConns.Get(a) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(a) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { sc.Shutdown() @@ -173,8 +172,7 @@ func (b *baseBalancer) regeneratePicker() { // Filter out all ready SCs from full subConn map. for _, addr := range b.subConns.Keys() { - sci, _ := b.subConns.Get(addr) - sc := sci.(balancer.SubConn) + sc, _ := b.subConns.Get(addr) if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { readySCs[sc] = SubConnInfo{Address: addr} } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go new file mode 100644 index 000000000..cc606f4da --- /dev/null +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go @@ -0,0 +1,356 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package endpointsharding implements a load balancing policy that manages +// homogeneous child policies each owning a single endpoint. +// +// # Experimental +// +// Notice: This package is EXPERIMENTAL and may be changed or removed in a +// later release. +package endpointsharding + +import ( + "errors" + rand "math/rand/v2" + "sync" + "sync/atomic" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +// ChildState is the balancer state of a child along with the endpoint which +// identifies the child balancer. +type ChildState struct { + Endpoint resolver.Endpoint + State balancer.State + + // Balancer exposes only the ExitIdler interface of the child LB policy. + // Other methods of the child policy are called only by endpointsharding. + Balancer balancer.ExitIdler +} + +// Options are the options to configure the behaviour of the +// endpointsharding balancer. +type Options struct { + // DisableAutoReconnect allows the balancer to keep child balancer in the + // IDLE state until they are explicitly triggered to exit using the + // ChildState obtained from the endpointsharding picker. When set to false, + // the endpointsharding balancer will automatically call ExitIdle on child + // connections that report IDLE. + DisableAutoReconnect bool +} + +// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same +// type as the balancer.Builder.Build method. +type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer + +// NewBalancer returns a load balancing policy that manages homogeneous child +// policies each owning a single endpoint. The endpointsharding balancer +// forwards the LoadBalancingConfig in ClientConn state updates to its children. +func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer { + es := &endpointSharding{ + cc: cc, + bOpts: opts, + esOpts: esOpts, + childBuilder: childBuilder, + } + es.children.Store(resolver.NewEndpointMap[*balancerWrapper]()) + return es +} + +// endpointSharding is a balancer that wraps child balancers. It creates a child +// balancer with child config for every unique Endpoint received. It updates the +// child states on any update from parent or child. +type endpointSharding struct { + cc balancer.ClientConn + bOpts balancer.BuildOptions + esOpts Options + childBuilder ChildBuilderFunc + + // childMu synchronizes calls to any single child. It must be held for all + // calls into a child. To avoid deadlocks, do not acquire childMu while + // holding mu. + childMu sync.Mutex + children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]] + + // inhibitChildUpdates is set during UpdateClientConnState/ResolverError + // calls (calls to children will each produce an update, only want one + // update). + inhibitChildUpdates atomic.Bool + + // mu synchronizes access to the state stored in balancerWrappers in the + // children field. mu must not be held during calls into a child since + // synchronous calls back from the child may require taking mu, causing a + // deadlock. To avoid deadlocks, do not acquire childMu while holding mu. + mu sync.Mutex +} + +// UpdateClientConnState creates a child for new endpoints and deletes children +// for endpoints that are no longer present. It also updates all the children, +// and sends a single synchronous update of the childrens' aggregated state at +// the end of the UpdateClientConnState operation. If any endpoint has no +// addresses it will ignore that endpoint. Otherwise, returns first error found +// from a child, but fully processes the new update. +func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error { + es.childMu.Lock() + defer es.childMu.Unlock() + + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + var ret error + + children := es.children.Load() + newChildren := resolver.NewEndpointMap[*balancerWrapper]() + + // Update/Create new children. + for _, endpoint := range state.ResolverState.Endpoints { + if _, ok := newChildren.Get(endpoint); ok { + // Endpoint child was already created, continue to avoid duplicate + // update. + continue + } + childBalancer, ok := children.Get(endpoint) + if ok { + // Endpoint attributes may have changed, update the stored endpoint. + es.mu.Lock() + childBalancer.childState.Endpoint = endpoint + es.mu.Unlock() + } else { + childBalancer = &balancerWrapper{ + childState: ChildState{Endpoint: endpoint}, + ClientConn: es.cc, + es: es, + } + childBalancer.childState.Balancer = childBalancer + childBalancer.child = es.childBuilder(childBalancer, es.bOpts) + } + newChildren.Set(endpoint, childBalancer) + if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{ + BalancerConfig: state.BalancerConfig, + ResolverState: resolver.State{ + Endpoints: []resolver.Endpoint{endpoint}, + Attributes: state.ResolverState.Attributes, + }, + }); err != nil && ret == nil { + // Return first error found, and always commit full processing of + // updating children. If desired to process more specific errors + // across all endpoints, caller should make these specific + // validations, this is a current limitation for simplicity sake. + ret = err + } + } + // Delete old children that are no longer present. + for _, e := range children.Keys() { + child, _ := children.Get(e) + if _, ok := newChildren.Get(e); !ok { + child.closeLocked() + } + } + es.children.Store(newChildren) + if newChildren.Len() == 0 { + return balancer.ErrBadResolverState + } + return ret +} + +// ResolverError forwards the resolver error to all of the endpointSharding's +// children and sends a single synchronous update of the childStates at the end +// of the ResolverError operation. +func (es *endpointSharding) ResolverError(err error) { + es.childMu.Lock() + defer es.childMu.Unlock() + es.inhibitChildUpdates.Store(true) + defer func() { + es.inhibitChildUpdates.Store(false) + es.updateState() + }() + children := es.children.Load() + for _, child := range children.Values() { + child.resolverErrorLocked(err) + } +} + +func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) { + // UpdateSubConnState is deprecated. +} + +func (es *endpointSharding) Close() { + es.childMu.Lock() + defer es.childMu.Unlock() + children := es.children.Load() + for _, child := range children.Values() { + child.closeLocked() + } +} + +// updateState updates this component's state. It sends the aggregated state, +// and a picker with round robin behavior with all the child states present if +// needed. +func (es *endpointSharding) updateState() { + if es.inhibitChildUpdates.Load() { + return + } + var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker + + es.mu.Lock() + defer es.mu.Unlock() + + children := es.children.Load() + childStates := make([]ChildState, 0, children.Len()) + + for _, child := range children.Values() { + childState := child.childState + childStates = append(childStates, childState) + childPicker := childState.State.Picker + switch childState.State.ConnectivityState { + case connectivity.Ready: + readyPickers = append(readyPickers, childPicker) + case connectivity.Connecting: + connectingPickers = append(connectingPickers, childPicker) + case connectivity.Idle: + idlePickers = append(idlePickers, childPicker) + case connectivity.TransientFailure: + transientFailurePickers = append(transientFailurePickers, childPicker) + // connectivity.Shutdown shouldn't appear. + } + } + + // Construct the round robin picker based off the aggregated state. Whatever + // the aggregated state, use the pickers present that are currently in that + // state only. + var aggState connectivity.State + var pickers []balancer.Picker + if len(readyPickers) >= 1 { + aggState = connectivity.Ready + pickers = readyPickers + } else if len(connectingPickers) >= 1 { + aggState = connectivity.Connecting + pickers = connectingPickers + } else if len(idlePickers) >= 1 { + aggState = connectivity.Idle + pickers = idlePickers + } else if len(transientFailurePickers) >= 1 { + aggState = connectivity.TransientFailure + pickers = transientFailurePickers + } else { + aggState = connectivity.TransientFailure + pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))} + } // No children (resolver error before valid update). + p := &pickerWithChildStates{ + pickers: pickers, + childStates: childStates, + next: uint32(rand.IntN(len(pickers))), + } + es.cc.UpdateState(balancer.State{ + ConnectivityState: aggState, + Picker: p, + }) +} + +// pickerWithChildStates delegates to the pickers it holds in a round robin +// fashion. It also contains the childStates of all the endpointSharding's +// children. +type pickerWithChildStates struct { + pickers []balancer.Picker + childStates []ChildState + next uint32 +} + +func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) { + nextIndex := atomic.AddUint32(&p.next, 1) + picker := p.pickers[nextIndex%uint32(len(p.pickers))] + return picker.Pick(info) +} + +// ChildStatesFromPicker returns the state of all the children managed by the +// endpoint sharding balancer that created this picker. +func ChildStatesFromPicker(picker balancer.Picker) []ChildState { + p, ok := picker.(*pickerWithChildStates) + if !ok { + return nil + } + return p.childStates +} + +// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by +// endpoint, and persists recent child balancer state. +type balancerWrapper struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + + // child contains the wrapped balancer. Access its methods only through + // methods on balancerWrapper to ensure proper synchronization + child balancer.Balancer + balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns + + es *endpointSharding + + // Access to the following fields is guarded by es.mu. + + childState ChildState + isClosed bool +} + +func (bw *balancerWrapper) UpdateState(state balancer.State) { + bw.es.mu.Lock() + bw.childState.State = state + bw.es.mu.Unlock() + if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect { + bw.ExitIdle() + } + bw.es.updateState() +} + +// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to +// avoid deadlocks due to synchronous balancer state updates. +func (bw *balancerWrapper) ExitIdle() { + if ei, ok := bw.child.(balancer.ExitIdler); ok { + go func() { + bw.es.childMu.Lock() + if !bw.isClosed { + ei.ExitIdle() + } + bw.es.childMu.Unlock() + }() + } +} + +// updateClientConnStateLocked delivers the ClientConnState to the child +// balancer. Callers must hold the child mutex of the parent endpointsharding +// balancer. +func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error { + return bw.child.UpdateClientConnState(ccs) +} + +// closeLocked closes the child balancer. Callers must hold the child mutext of +// the parent endpointsharding balancer. +func (bw *balancerWrapper) closeLocked() { + bw.child.Close() + bw.isClosed = true +} + +func (bw *balancerWrapper) resolverErrorLocked(err error) { + bw.child.ResolverError(err) +} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go index 2fc0a71f9..494314f23 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go @@ -54,9 +54,18 @@ func init() { balancer.Register(pickfirstBuilder{}) } -// enableHealthListenerKeyType is a unique key type used in resolver attributes -// to indicate whether the health listener usage is enabled. -type enableHealthListenerKeyType struct{} +type ( + // enableHealthListenerKeyType is a unique key type used in resolver + // attributes to indicate whether the health listener usage is enabled. + enableHealthListenerKeyType struct{} + // managedByPickfirstKeyType is an attribute key type to inform Outlier + // Detection that the generic health listener is being used. + // TODO: https://github.com/grpc/grpc-go/issues/7915 - Remove this when + // implementing the dualstack design. This is a hack. Once Dualstack is + // completed, outlier detection will stop sending ejection updates through + // the connectivity listener. + managedByPickfirstKeyType struct{} +) var ( logger = grpclog.Component("pick-first-leaf-lb") @@ -111,9 +120,9 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) b := &pickfirstBalancer{ cc: cc, target: bo.Target.String(), - metricsRecorder: bo.MetricsRecorder, // ClientConn will always create a Metrics Recorder. + metricsRecorder: cc.MetricsRecorder(), - subConns: resolver.NewAddressMap(), + subConns: resolver.NewAddressMapV2[*scData](), state: connectivity.Connecting, cancelConnectionTimer: func() {}, } @@ -140,6 +149,17 @@ func EnableHealthListener(state resolver.State) resolver.State { return state } +// IsManagedByPickfirst returns whether an address belongs to a SubConn +// managed by the pickfirst LB policy. +// TODO: https://github.com/grpc/grpc-go/issues/7915 - This is a hack to disable +// outlier_detection via the with connectivity listener when using pick_first. +// Once Dualstack changes are complete, all SubConns will be created by +// pick_first and outlier detection will only use the health listener for +// ejection. This hack can then be removed. +func IsManagedByPickfirst(addr resolver.Address) bool { + return addr.BalancerAttributes.Value(managedByPickfirstKeyType{}) != nil +} + type pfConfig struct { serviceconfig.LoadBalancingConfig `json:"-"` @@ -166,6 +186,7 @@ type scData struct { } func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) { + addr.BalancerAttributes = addr.BalancerAttributes.WithValue(managedByPickfirstKeyType{}, true) sd := &scData{ rawConnectivityState: connectivity.Idle, effectiveState: connectivity.Idle, @@ -199,7 +220,7 @@ type pickfirstBalancer struct { // updates. state connectivity.State // scData for active subonns mapped by address. - subConns *resolver.AddressMap + subConns *resolver.AddressMapV2[*scData] addressList addressList firstPass bool numTF int @@ -298,7 +319,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState prevAddr := b.addressList.currentAddress() prevSCData, found := b.subConns.Get(prevAddr) prevAddrsCount := b.addressList.size() - isPrevRawConnectivityStateReady := found && prevSCData.(*scData).rawConnectivityState == connectivity.Ready + isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready b.addressList.updateAddrs(newAddrs) // If the previous ready SubConn exists in new address list, @@ -360,21 +381,21 @@ func (b *pickfirstBalancer) startFirstPassLocked() { b.numTF = 0 // Reset the connection attempt record for existing SubConns. for _, sd := range b.subConns.Values() { - sd.(*scData).connectionFailedInFirstPass = false + sd.connectionFailedInFirstPass = false } b.requestConnectionLocked() } func (b *pickfirstBalancer) closeSubConnsLocked() { for _, sd := range b.subConns.Values() { - sd.(*scData).subConn.Shutdown() + sd.subConn.Shutdown() } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() } // deDupAddresses ensures that each address appears only once in the slice. func deDupAddresses(addrs []resolver.Address) []resolver.Address { - seenAddrs := resolver.NewAddressMap() + seenAddrs := resolver.NewAddressMapV2[*scData]() retAddrs := []resolver.Address{} for _, addr := range addrs { @@ -460,7 +481,7 @@ func addressFamily(address string) ipAddrFamily { // This ensures that the subchannel map accurately reflects the current set of // addresses received from the name resolver. func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) { - newAddrsMap := resolver.NewAddressMap() + newAddrsMap := resolver.NewAddressMapV2[bool]() for _, addr := range newAddrs { newAddrsMap.Set(addr, true) } @@ -470,7 +491,7 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) continue } val, _ := b.subConns.Get(oldAddr) - val.(*scData).subConn.Shutdown() + val.subConn.Shutdown() b.subConns.Delete(oldAddr) } } @@ -479,13 +500,12 @@ func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) // becomes ready, which means that all other subConn must be shutdown. func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) { b.cancelConnectionTimer() - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if sd.subConn != selected.subConn { sd.subConn.Shutdown() } } - b.subConns = resolver.NewAddressMap() + b.subConns = resolver.NewAddressMapV2[*scData]() b.subConns.Set(selected.addr, selected) } @@ -518,18 +538,17 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.subConns.Set(curAddr, sd) } - scd := sd.(*scData) - switch scd.rawConnectivityState { + switch sd.rawConnectivityState { case connectivity.Idle: - scd.subConn.Connect() + sd.subConn.Connect() b.scheduleNextConnectionLocked() return case connectivity.TransientFailure: // The SubConn is being re-used and failed during a previous pass // over the addressList. It has not completed backoff yet. // Mark it as having failed and try the next address. - scd.connectionFailedInFirstPass = true - lastErr = scd.lastErr + sd.connectionFailedInFirstPass = true + lastErr = sd.lastErr continue case connectivity.Connecting: // Wait for the connection attempt to complete or the timer to fire @@ -537,7 +556,7 @@ func (b *pickfirstBalancer) requestConnectionLocked() { b.scheduleNextConnectionLocked() return default: - b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", scd.rawConnectivityState) + b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState) return } @@ -732,8 +751,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { } // Connect() has been called on all the SubConns. The first pass can be // ended if all the SubConns have reported a failure. - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if !sd.connectionFailedInFirstPass { return } @@ -744,8 +762,7 @@ func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) { Picker: &picker{err: lastErr}, }) // Start re-connecting all the SubConns that are already in IDLE. - for _, v := range b.subConns.Values() { - sd := v.(*scData) + for _, sd := range b.subConns.Values() { if sd.rawConnectivityState == connectivity.Idle { sd.subConn.Connect() } @@ -906,6 +923,5 @@ func (al *addressList) hasNext() bool { // fields that are meaningful to the SubConn. func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool { return a.Addr == b.Addr && a.ServerName == b.ServerName && - a.Attributes.Equal(b.Attributes) && - a.Metadata == b.Metadata + a.Attributes.Equal(b.Attributes) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go index 80a42d225..35da5d1ec 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -22,12 +22,13 @@ package roundrobin import ( - rand "math/rand/v2" - "sync/atomic" + "fmt" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/balancer/endpointsharding" + "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" "google.golang.org/grpc/grpclog" + internalgrpclog "google.golang.org/grpc/internal/grpclog" ) // Name is the name of round_robin balancer. @@ -35,47 +36,44 @@ const Name = "round_robin" var logger = grpclog.Component("roundrobin") -// newBuilder creates a new roundrobin balancer builder. -func newBuilder() balancer.Builder { - return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true}) -} - func init() { - balancer.Register(newBuilder()) + balancer.Register(builder{}) } -type rrPickerBuilder struct{} +type builder struct{} -func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker { - logger.Infof("roundrobinPicker: Build called with info: %v", info) - if len(info.ReadySCs) == 0 { - return base.NewErrPicker(balancer.ErrNoSubConnAvailable) +func (bb builder) Name() string { + return Name +} + +func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + childBuilder := balancer.Get(pickfirstleaf.Name).Build + bal := &rrBalancer{ + cc: cc, + Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}), } - scs := make([]balancer.SubConn, 0, len(info.ReadySCs)) - for sc := range info.ReadySCs { - scs = append(scs, sc) - } - return &rrPicker{ - subConns: scs, - // Start at a random index, as the same RR balancer rebuilds a new - // picker when SubConn states change, and we don't want to apply excess - // load to the first server in the list. - next: uint32(rand.IntN(len(scs))), + bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal)) + bal.logger.Infof("Created") + return bal +} + +type rrBalancer struct { + balancer.Balancer + cc balancer.ClientConn + logger *internalgrpclog.PrefixLogger +} + +func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { + return b.Balancer.UpdateClientConnState(balancer.ClientConnState{ + // Enable the health listener in pickfirst children for client side health + // checks and outlier detection, if configured. + ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState), + }) +} + +func (b *rrBalancer) ExitIdle() { + // Should always be ok, as child is endpoint sharding. + if ei, ok := b.Balancer.(balancer.ExitIdler); ok { + ei.ExitIdle() } } - -type rrPicker struct { - // subConns is the snapshot of the roundrobin balancer when this picker was - // created. The slice is immutable. Each Get() will do a round robin - // selection from it and return the selected SubConn. - subConns []balancer.SubConn - next uint32 -} - -func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - subConnsLen := uint32(len(p.subConns)) - nextIndex := atomic.AddUint32(&p.next, 1) - - sc := p.subConns[nextIndex%subConnsLen] - return balancer.PickResult{SubConn: sc}, nil -} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/subconn.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/subconn.go index ea27c4fa7..9ee44d4af 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/subconn.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer/subconn.go @@ -44,7 +44,7 @@ import ( // should only use a single address. // // NOTICE: This interface is intended to be implemented by gRPC, or intercepted -// by custom load balancing poilices. Users should not need their own complete +// by custom load balancing polices. Users should not need their own complete // implementation of this interface -- they should always delegate to a SubConn // returned by ClientConn.NewSubConn() by embedding it in their implementations. // An embedded SubConn must never be nil, or runtime panics will occur. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer_wrapper.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer_wrapper.go index 905817b5f..948a21ef6 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer_wrapper.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancer/gracefulswitch" "google.golang.org/grpc/internal/channelz" @@ -34,7 +35,15 @@ import ( "google.golang.org/grpc/status" ) -var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) +var ( + setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address)) + // noOpRegisterHealthListenerFn is used when client side health checking is + // disabled. It sends a single READY update on the registered listener. + noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() { + listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + return func() {} + } +) // ccBalancerWrapper sits between the ClientConn and the Balancer. // @@ -51,6 +60,7 @@ var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnSt // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { + internal.EnforceClientConnEmbedding // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. cc *ClientConn @@ -84,7 +94,6 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { CustomUserAgent: cc.dopts.copts.UserAgent, ChannelzParent: cc.channelz, Target: cc.parsedTarget, - MetricsRecorder: cc.metricsRecorderList, }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, @@ -93,6 +102,10 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { return ccb } +func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder { + return ccb.cc.metricsRecorderList +} + // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. This is always executed from the serializer, so // it is safe to call into the balancer here. @@ -277,10 +290,17 @@ type healthData struct { // to the LB policy. This is stored to avoid sending updates when the // SubConn has already exited connectivity state READY. connectivityState connectivity.State + // closeHealthProducer stores function to close the ref counted health + // producer. The health producer is automatically closed when the SubConn + // state changes. + closeHealthProducer func() } func newHealthData(s connectivity.State) *healthData { - return &healthData{connectivityState: s} + return &healthData{ + connectivityState: s, + closeHealthProducer: func() {}, + } } // updateState is invoked by grpc to push a subConn state update to the @@ -400,7 +420,7 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) ( } acbw.producersMu.Unlock() } - return pData.producer, grpcsync.OnceFunc(unref) + return pData.producer, sync.OnceFunc(unref) } func (acbw *acBalancerWrapper) closeProducers() { @@ -413,6 +433,37 @@ func (acbw *acBalancerWrapper) closeProducers() { } } +// healthProducerRegisterFn is a type alias for the health producer's function +// for registering listeners. +type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func() + +// healthListenerRegFn returns a function to register a listener for health +// updates. If client side health checks are disabled, the registered listener +// will get a single READY (raw connectivity state) update. +// +// Client side health checking is enabled when all the following +// conditions are satisfied: +// 1. Health checking is not disabled using the dial option. +// 2. The health package is imported. +// 3. The health check config is present in the service config. +func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() { + if acbw.ccb.cc.dopts.disableHealthCheck { + return noOpRegisterHealthListenerFn + } + regHealthLisFn := internal.RegisterClientHealthCheckListener + if regHealthLisFn == nil { + // The health package is not imported. + return noOpRegisterHealthListenerFn + } + cfg := acbw.ac.cc.healthCheckConfig() + if cfg == nil { + return noOpRegisterHealthListenerFn + } + return func(ctx context.Context, listener func(balancer.SubConnState)) func() { + return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener) + } +} + // RegisterHealthListener accepts a health listener from the LB policy. It sends // updates to the health listener as long as the SubConn's connectivity state // doesn't change and a new health listener is not registered. To invalidate @@ -421,6 +472,7 @@ func (acbw *acBalancerWrapper) closeProducers() { func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) { acbw.healthMu.Lock() defer acbw.healthMu.Unlock() + acbw.healthData.closeHealthProducer() // listeners should not be registered when the connectivity state // isn't Ready. This may happen when the balancer registers a listener // after the connectivityState is updated, but before it is notified @@ -436,6 +488,7 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub return } + registerFn := acbw.healthListenerRegFn() acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { if ctx.Err() != nil || acbw.ccb.balancer == nil { return @@ -443,10 +496,25 @@ func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.Sub // Don't send updates if a new listener is registered. acbw.healthMu.Lock() defer acbw.healthMu.Unlock() - curHD := acbw.healthData - if curHD != hd { + if acbw.healthData != hd { return } - listener(balancer.SubConnState{ConnectivityState: connectivity.Ready}) + // Serialize the health updates from the health producer with + // other calls into the LB policy. + listenerWrapper := func(scs balancer.SubConnState) { + acbw.ccb.serializer.TrySchedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + acbw.healthMu.Lock() + defer acbw.healthMu.Unlock() + if acbw.healthData != hd { + return + } + listener(scs) + }) + } + + hd.closeHealthProducer = registerFn(ctx, listenerWrapper) }) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index 9e9d08069..825c31795 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.36.5 // protoc v5.27.1 // source: grpc/binlog/v1/binarylog.proto @@ -31,6 +31,7 @@ import ( timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -233,10 +234,7 @@ func (Address_Type) EnumDescriptor() ([]byte, []int) { // Log entry we store in binary logs type GrpcLogEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // The timestamp of the binary log message Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // Uniquely identifies a call. The value must not be 0 in order to disambiguate @@ -255,7 +253,7 @@ type GrpcLogEntry struct { // The logger uses one of the following fields to record the payload, // according to the type of the log entry. // - // Types that are assignable to Payload: + // Types that are valid to be assigned to Payload: // // *GrpcLogEntry_ClientHeader // *GrpcLogEntry_ServerHeader @@ -269,7 +267,9 @@ type GrpcLogEntry struct { // EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in // the case of trailers-only. On server side, peer is always // logged on EVENT_TYPE_CLIENT_HEADER. - Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GrpcLogEntry) Reset() { @@ -337,37 +337,45 @@ func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger { return GrpcLogEntry_LOGGER_UNKNOWN } -func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { - if m != nil { - return m.Payload +func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload { + if x != nil { + return x.Payload } return nil } func (x *GrpcLogEntry) GetClientHeader() *ClientHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ClientHeader); ok { - return x.ClientHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok { + return x.ClientHeader + } } return nil } func (x *GrpcLogEntry) GetServerHeader() *ServerHeader { - if x, ok := x.GetPayload().(*GrpcLogEntry_ServerHeader); ok { - return x.ServerHeader + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok { + return x.ServerHeader + } } return nil } func (x *GrpcLogEntry) GetMessage() *Message { - if x, ok := x.GetPayload().(*GrpcLogEntry_Message); ok { - return x.Message + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Message); ok { + return x.Message + } } return nil } func (x *GrpcLogEntry) GetTrailer() *Trailer { - if x, ok := x.GetPayload().(*GrpcLogEntry_Trailer); ok { - return x.Trailer + if x != nil { + if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok { + return x.Trailer + } } return nil } @@ -416,10 +424,7 @@ func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {} func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {} type ClientHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The name of the RPC method, which looks something like: @@ -433,7 +438,9 @@ type ClientHeader struct { // or : . Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"` // the RPC timeout - Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientHeader) Reset() { @@ -495,12 +502,11 @@ func (x *ClientHeader) GetTimeout() *durationpb.Duration { } type ServerHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. - Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ServerHeader) Reset() { @@ -541,10 +547,7 @@ func (x *ServerHeader) GetMetadata() *Metadata { } type Trailer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // This contains only the metadata from the application. Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"` // The gRPC status code. @@ -555,6 +558,8 @@ type Trailer struct { // The value of the 'grpc-status-details-bin' metadata key. If // present, this is always an encoded 'google.rpc.Status' message. StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Trailer) Reset() { @@ -617,15 +622,14 @@ func (x *Trailer) GetStatusDetails() []byte { // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE type Message struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Length of the message. It may not be the same as the length of the // data field, as the logging payload can be truncated or omitted. Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` // May be truncated or omitted. - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Message) Reset() { @@ -694,11 +698,10 @@ func (x *Message) GetData() []byte { // header is just a normal metadata key. // The pair will not count towards the size limit. type Metadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` unknownFields protoimpl.UnknownFields - - Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Metadata) Reset() { @@ -740,12 +743,11 @@ func (x *Metadata) GetEntry() []*MetadataEntry { // A metadata key value pair type MetadataEntry struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` unknownFields protoimpl.UnknownFields - - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + sizeCache protoimpl.SizeCache } func (x *MetadataEntry) Reset() { @@ -794,14 +796,13 @@ func (x *MetadataEntry) GetValue() []byte { // Address information type Address struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` - Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"` + Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"` // only for TYPE_IPV4 and TYPE_IPV6 - IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Address) Reset() { @@ -857,7 +858,7 @@ func (x *Address) GetIpPort() uint32 { var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor -var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ +var file_grpc_binlog_v1_binarylog_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, @@ -983,16 +984,16 @@ var file_grpc_binlog_v1_binarylog_proto_rawDesc = []byte{ 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x69, 0x6e, 0x61, 0x72, 0x79, 0x6c, 0x6f, 0x67, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once - file_grpc_binlog_v1_binarylog_proto_rawDescData = file_grpc_binlog_v1_binarylog_proto_rawDesc + file_grpc_binlog_v1_binarylog_proto_rawDescData []byte ) func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte { file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() { - file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_binlog_v1_binarylog_proto_rawDescData) + file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc))) }) return file_grpc_binlog_v1_binarylog_proto_rawDescData } @@ -1051,7 +1052,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_binlog_v1_binarylog_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)), NumEnums: 3, NumMessages: 8, NumExtensions: 0, @@ -1063,7 +1064,6 @@ func file_grpc_binlog_v1_binarylog_proto_init() { MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes, }.Build() File_grpc_binlog_v1_binarylog_proto = out.File - file_grpc_binlog_v1_binarylog_proto_rawDesc = nil file_grpc_binlog_v1_binarylog_proto_goTypes = nil file_grpc_binlog_v1_binarylog_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/clientconn.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/clientconn.go index 4f57b5543..4f350ca56 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/clientconn.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/clientconn.go @@ -118,12 +118,26 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // NewClient creates a new gRPC "channel" for the target URI provided. No I/O // is performed. Use of the ClientConn for RPCs will automatically cause it to -// connect. Connect may be used to manually create a connection, but for most -// users this is unnecessary. +// connect. The Connect method may be called to manually create a connection, +// but for most users this should be unnecessary. // // The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. e.g. to use dns -// resolver, a "dns:///" prefix should be applied to the target. +// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns +// name resolver, a "dns:///" prefix may be applied to the target. The default +// name resolver will be used if no scheme is detected, or if the parsed scheme +// is not a registered name resolver. The default resolver is "dns" but can be +// overridden using the resolver package's SetDefaultScheme. +// +// Examples: +// +// - "foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com:8080" +// - "dns:///foo.googleapis.com" +// - "dns:///10.0.0.213:8080" +// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443" +// - "dns://8.8.8.8/foo.googleapis.com:8080" +// - "dns://8.8.8.8/foo.googleapis.com" +// - "zookeeper://zk.example.com:9900/example_service" // // The DialOptions returned by WithBlock, WithTimeout, // WithReturnConnectionError, and FailOnNonTempDialError are ignored by this @@ -181,7 +195,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig) } - cc.mkp = cc.dopts.copts.KeepaliveParams + cc.keepaliveParams = cc.dopts.copts.KeepaliveParams if err = cc.initAuthority(); err != nil { return nil, err @@ -225,7 +239,12 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. - opts = append([]DialOption{withDefaultScheme("passthrough")}, opts...) + // + // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it + // preserves behavior: when default scheme passthrough is used, skip + // hostname resolution, when "dns" is used for resolution, perform + // resolution on the client. + opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...) cc, err := NewClient(target, opts...) if err != nil { return nil, err @@ -618,7 +637,7 @@ type ClientConn struct { balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. - mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway. // firstResolveEvent is used to track whether the name resolver sent us at // least one update. RPCs block on this event. May be accessed without mu // if we know we cannot be asked to enter idle mode while accessing it (e.g. @@ -867,7 +886,13 @@ func (cc *ClientConn) Target() string { return cc.target } -// CanonicalTarget returns the canonical target string of the ClientConn. +// CanonicalTarget returns the canonical target string used when creating cc. +// +// This always has the form "://[authority]/". For example: +// +// - "dns:///example.com:42" +// - "dns://8.8.8.8/example.com:42" +// - "unix:///path/to/socket" func (cc *ClientConn) CanonicalTarget() string { return cc.parsedTarget.String() } @@ -1206,12 +1231,11 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) // adjustParams updates parameters used to create transports upon // receiving a GoAway. func (ac *addrConn) adjustParams(r transport.GoAwayReason) { - switch r { - case transport.GoAwayTooManyPings: + if r == transport.GoAwayTooManyPings { v := 2 * ac.dopts.copts.KeepaliveParams.Time ac.cc.mu.Lock() - if v > ac.cc.mkp.Time { - ac.cc.mkp.Time = v + if v > ac.cc.keepaliveParams.Time { + ac.cc.keepaliveParams.Time = v } ac.cc.mu.Unlock() } @@ -1307,7 +1331,7 @@ func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, c ac.mu.Lock() ac.cc.mu.RLock() - ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams ac.cc.mu.RUnlock() copts := ac.dopts.copts diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/credentials/tls.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/credentials/tls.go index e163a473d..bd5fe22b6 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/credentials/tls.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/credentials/tls.go @@ -32,6 +32,8 @@ import ( "google.golang.org/grpc/internal/envconfig" ) +const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434" + var logger = grpclog.Component("credentials") // TLSInfo contains the auth information for a TLS authenticated connection. @@ -128,7 +130,7 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon if np == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName) } @@ -158,7 +160,7 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) if cs.NegotiatedProtocol == "" { if envconfig.EnforceALPNEnabled { conn.Close() - return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property") + return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage) } else if logger.V(2) { logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases") } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/dialoptions.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/dialoptions.go index 7494ae591..405a2ffeb 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/dialoptions.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/dialoptions.go @@ -73,7 +73,7 @@ type dialOptions struct { chainUnaryInts []UnaryClientInterceptor chainStreamInts []StreamClientInterceptor - cp Compressor + compressorV0 Compressor dc Decompressor bs internalbackoff.Strategy block bool @@ -94,6 +94,8 @@ type dialOptions struct { idleTimeout time.Duration defaultScheme string maxCallAttempts int + enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled. + useProxy bool // Specifies if a server should be connected via proxy. } // DialOption configures how we set up the connection. @@ -256,7 +258,7 @@ func WithCodec(c Codec) DialOption { // Deprecated: use UseCompressor instead. Will be supported throughout 1.x. func WithCompressor(cp Compressor) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.cp = cp + o.compressorV0 = cp }) } @@ -377,7 +379,22 @@ func WithInsecure() DialOption { // later release. func WithNoProxy() DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UseProxy = false + o.useProxy = false + }) +} + +// WithLocalDNSResolution forces local DNS name resolution even when a proxy is +// specified in the environment. By default, the server name is provided +// directly to the proxy as part of the CONNECT handshake. This is ignored if +// WithNoProxy is used. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithLocalDNSResolution() DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.enableLocalDNSResolution = true }) } @@ -428,6 +445,11 @@ func WithTimeout(d time.Duration) DialOption { // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. // +// Note that gRPC by default performs name resolution on the target passed to +// NewClient. To bypass name resolution and cause the target string to be +// passed directly to the dialer here instead, use the "passthrough" resolver +// by specifying it in the target string, e.g. "passthrough:target". +// // Note: All supported releases of Go (as of December 2023) override the OS // defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive // with OS defaults for keepalive time and interval, use a net.Dialer that sets @@ -662,14 +684,15 @@ func defaultDialOptions() dialOptions { copts: transport.ConnectOptions{ ReadBufferSize: defaultReadBufSize, WriteBufferSize: defaultWriteBufSize, - UseProxy: true, UserAgent: grpcUA, BufferPool: mem.DefaultBufferPool(), }, - bs: internalbackoff.DefaultExponential, - idleTimeout: 30 * time.Minute, - defaultScheme: "dns", - maxCallAttempts: defaultMaxCallAttempts, + bs: internalbackoff.DefaultExponential, + idleTimeout: 30 * time.Minute, + defaultScheme: "dns", + maxCallAttempts: defaultMaxCallAttempts, + useProxy: true, + enableLocalDNSResolution: false, } } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 26e16d919..faa59e418 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.1 +// protoc-gen-go v1.36.5 // protoc v5.27.1 // source: grpc/health/v1/health.proto @@ -28,6 +28,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) const ( @@ -90,11 +91,10 @@ func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { } type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckRequest) Reset() { @@ -135,11 +135,10 @@ func (x *HealthCheckRequest) GetService() string { } type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` unknownFields protoimpl.UnknownFields - - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HealthCheckResponse) Reset() { @@ -179,9 +178,90 @@ func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { return HealthCheckResponse_UNKNOWN } +type HealthListRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListRequest) Reset() { + *x = HealthListRequest{} + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListRequest) ProtoMessage() {} + +func (x *HealthListRequest) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead. +func (*HealthListRequest) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2} +} + +type HealthListResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // statuses contains all the services and their respective status. + Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthListResponse) Reset() { + *x = HealthListResponse{} + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthListResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthListResponse) ProtoMessage() {} + +func (x *HealthListResponse) ProtoReflect() protoreflect.Message { + mi := &file_grpc_health_v1_health_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead. +func (*HealthListResponse) Descriptor() ([]byte, []int) { + return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3} +} + +func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse { + if x != nil { + return x.Statuses + } + return nil +} + var File_grpc_health_v1_health_proto protoreflect.FileDescriptor -var file_grpc_health_v1_health_proto_rawDesc = []byte{ +var file_grpc_health_v1_health_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, @@ -199,56 +279,83 @@ var file_grpc_health_v1_health_proto_rawDesc = []byte{ 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x03, 0x22, 0x13, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xc4, 0x01, 0x0a, 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, + 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4c, 0x0a, + 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x30, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, + 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x1a, 0x60, 0x0a, 0x0d, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x39, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, + 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x32, 0xfd, 0x01, + 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, 0x43, 0x68, 0x65, 0x63, + 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} + 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x4c, 0x69, + 0x73, 0x74, 0x12, 0x21, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, + 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x4c, 0x69, 0x73, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x05, 0x57, 0x61, 0x74, + 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x70, 0x0a, + 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x5f, 0x76, 0x31, 0xa2, + 0x02, 0x0c, 0x47, 0x72, 0x70, 0x63, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x56, 0x31, 0xaa, 0x02, + 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +}) var ( file_grpc_health_v1_health_proto_rawDescOnce sync.Once - file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc + file_grpc_health_v1_health_proto_rawDescData []byte ) func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { - file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) + file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc))) }) return file_grpc_health_v1_health_proto_rawDescData } var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5) var file_grpc_health_v1_health_proto_goTypes = []any{ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse + (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest + (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse + nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry } var file_grpc_health_v1_health_proto_depIdxs = []int32{ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus - 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest - 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest - 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse - 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name + 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry + 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse + 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest + 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest + 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest + 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse + 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse + 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse + 6, // [6:9] is the sub-list for method output_type + 3, // [3:6] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name } func init() { file_grpc_health_v1_health_proto_init() } @@ -260,9 +367,9 @@ func file_grpc_health_v1_health_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)), NumEnums: 1, - NumMessages: 2, + NumMessages: 5, NumExtensions: 0, NumServices: 1, }, @@ -272,7 +379,6 @@ func file_grpc_health_v1_health_proto_init() { MessageInfos: file_grpc_health_v1_health_proto_msgTypes, }.Build() File_grpc_health_v1_health_proto = out.File - file_grpc_health_v1_health_proto_rawDesc = nil file_grpc_health_v1_health_proto_goTypes = nil file_grpc_health_v1_health_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index f96b8ab49..93136610e 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -37,6 +37,7 @@ const _ = grpc.SupportPackageIsVersion9 const ( Health_Check_FullMethodName = "/grpc.health.v1.Health/Check" + Health_List_FullMethodName = "/grpc.health.v1.Health/List" Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch" ) @@ -55,9 +56,19 @@ type HealthClient interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -94,6 +105,16 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts . return out, nil } +func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthListResponse) + err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...) @@ -128,9 +149,19 @@ type HealthServer interface { // // Clients should set a deadline when calling Check, and can declare the // server unhealthy if they do not receive a timely response. - // - // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) + // List provides a non-atomic snapshot of the health of all the available + // services. + // + // The server may respond with a RESOURCE_EXHAUSTED error if too many services + // exist. + // + // Clients should set a deadline when calling List, and can declare the server + // unhealthy if they do not receive a timely response. + // + // Clients should keep in mind that the list of health services exposed by an + // application can change over the lifetime of the process. + List(context.Context, *HealthListRequest) (*HealthListResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current // serving status. It will then subsequently send a new message whenever @@ -159,6 +190,9 @@ type UnimplementedHealthServer struct{} func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") } +func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method List not implemented") +} func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error { return status.Errorf(codes.Unimplemented, "method Watch not implemented") } @@ -200,6 +234,24 @@ func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interf return interceptor(ctx, in, info, handler) } +func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthListRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).List(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Health_List_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).List(ctx, req.(*HealthListRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(HealthCheckRequest) if err := stream.RecvMsg(m); err != nil { @@ -222,6 +274,10 @@ var Health_ServiceDesc = grpc.ServiceDesc{ MethodName: "Check", Handler: _Health_Check_Handler, }, + { + MethodName: "List", + Handler: _Health_List_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/producer.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/producer.go new file mode 100644 index 000000000..f938e5790 --- /dev/null +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/health/producer.go @@ -0,0 +1,106 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package health + +import ( + "context" + "sync" + + "google.golang.org/grpc" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/status" +) + +func init() { + producerBuilderSingleton = &producerBuilder{} + internal.RegisterClientHealthCheckListener = registerClientSideHealthCheckListener +} + +type producerBuilder struct{} + +var producerBuilderSingleton *producerBuilder + +// Build constructs and returns a producer and its cleanup function. +func (*producerBuilder) Build(cci any) (balancer.Producer, func()) { + p := &healthServiceProducer{ + cc: cci.(grpc.ClientConnInterface), + cancel: func() {}, + } + return p, func() { + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + } +} + +type healthServiceProducer struct { + // The following fields are initialized at build time and read-only after + // that and therefore do not need to be guarded by a mutex. + cc grpc.ClientConnInterface + + mu sync.Mutex + cancel func() +} + +// registerClientSideHealthCheckListener accepts a listener to provide server +// health state via the health service. +func registerClientSideHealthCheckListener(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() { + pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton) + p := pr.(*healthServiceProducer) + p.mu.Lock() + defer p.mu.Unlock() + p.cancel() + if listener == nil { + return closeFn + } + + ctx, cancel := context.WithCancel(ctx) + p.cancel = cancel + + go p.startHealthCheck(ctx, sc, serviceName, listener) + return closeFn +} + +func (p *healthServiceProducer) startHealthCheck(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) { + newStream := func(method string) (any, error) { + return p.cc.NewStream(ctx, &grpc.StreamDesc{ServerStreams: true}, method) + } + + setConnectivityState := func(state connectivity.State, err error) { + listener(balancer.SubConnState{ + ConnectivityState: state, + ConnectionError: err, + }) + } + + // Call the function through the internal variable as tests use it for + // mocking. + err := internal.HealthCheckFunc(ctx, newStream, setConnectivityState, serviceName) + if err == nil { + return + } + if status.Code(err) == codes.Unimplemented { + logger.Errorf("Subchannel health check is unimplemented at server side, thus health check is disabled for SubConn %p", sc) + } else { + logger.Errorf("Health checking failed for SubConn %p: %v", sc, err) + } +} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 73bb4c4ee..fbc1ca356 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -109,8 +109,9 @@ func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error return nil, errBalancerClosed } bw := &balancerWrapper{ - builder: builder, - gsb: gsb, + ClientConn: gsb.cc, + builder: builder, + gsb: gsb, lastState: balancer.State{ ConnectivityState: connectivity.Connecting, Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), @@ -293,6 +294,7 @@ func (gsb *Balancer) Close() { // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. type balancerWrapper struct { + balancer.ClientConn balancer.Balancer gsb *Balancer builder balancer.Builder @@ -413,7 +415,3 @@ func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver bw.gsb.mu.Unlock() bw.gsb.cc.UpdateAddresses(sc, addrs) } - -func (bw *balancerWrapper) Target() string { - return bw.gsb.cc.Target() -} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 6e7dd6b77..cc5713fd9 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -49,12 +49,26 @@ var ( // XDSFallbackSupport is the env variable that controls whether support for // xDS fallback is turned on. If this is unset or is false, only the first // xDS server in the list of server configs will be used. - XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false) + XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", true) // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used - // instead of the exiting pickfirst implementation. This can be enabled by + // instead of the exiting pickfirst implementation. This can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST" - // to "true". - NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false) + // to "false". + NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true) + + // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash + // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by + // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the + // implementation of A76 is stable, we will flip the default value to false + // in a subsequent release. A final release will remove this environment + // variable, enabling the new behavior unconditionally. + XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true) + + // RingHashSetRequestHashKey is set if the ring hash balancer can get the + // request hash header by setting the "requestHashHeader" field, according + // to gRFC A76. It can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true". + RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 29f234acb..2eb97f832 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -53,4 +53,14 @@ var ( // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + + // XDSDualstackEndpointsEnabled is true if gRPC should read the + // "additional addresses" in the xDS endpoint resource. + XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true) + + // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use + // the system's default root certificates for TLS certificate validation. + // For more details, see: + // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md. + XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false) ) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go deleted file mode 100644 index 6635f7bca..000000000 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/grpcsync/oncefunc.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * - * Copyright 2022 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpcsync - -import ( - "sync" -) - -// OnceFunc returns a function wrapping f which ensures f is only executed -// once even if the returned function is executed multiple times. -func OnceFunc(f func()) func() { - var once sync.Once - return func() { - once.Do(f) - } -} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/internal.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/internal.go index 3afc18134..2ce012cda 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/internal.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/internal.go @@ -31,6 +31,10 @@ import ( var ( // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker + // RegisterClientHealthCheckListener is used to provide a listener for + // updates from the client-side health checking service. It returns a + // function that can be called to stop the health producer. + RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() // BalancerUnregister is exported by package balancer to unregister a balancer. BalancerUnregister func(name string) // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by @@ -60,6 +64,9 @@ var ( // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials + // MetricsRecorderForServer returns the MetricsRecorderList derived from a + // server's stats handlers. + MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // @@ -147,6 +154,20 @@ var ( // other features, including the CSDS service. NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) + // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder + // using the provided xDS pool instead of creating a new one using the + // bootstrap configuration specified by the supported environment variables. + // The resolver.Builder is meant to be used in conjunction with the + // grpc.WithResolvers DialOption. The resolver.Builder does not take + // ownership of the provided xDS client and it is the responsibility of the + // caller to close the client when no longer required. + // + // Testing Only + // + // This function should ONLY be used for testing and may not work with some + // other features, including the CSDS service. + NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error) + // NewXDSResolverWithClientForTesting creates a new xDS resolver builder // using the provided xDS client instead of creating a new one using the // bootstrap configuration specified by the supported environment variables. @@ -238,6 +259,13 @@ var ( // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for // testing purposes. SetBufferPoolingThresholdForTesting any // func(int) + + // TimeAfterFunc is used to create timers. During tests the function is + // replaced to track allocated timers and fail the test if a timer isn't + // cancelled. + TimeAfterFunc = func(d time.Duration, f func()) Timer { + return time.AfterFunc(d, f) + } ) // HealthChecker defines the signature of the client-side LB channel health @@ -273,3 +301,15 @@ const RLSLoadBalancingPolicyName = "rls_experimental" type EnforceSubConnEmbedding interface { enforceSubConnEmbedding() } + +// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation +// embedding. +type EnforceClientConnEmbedding interface { + enforceClientConnEmbedding() +} + +// Timer is an interface to allow injecting different time.Timer implementations +// during tests. +type Timer interface { + Stop() bool +} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/metadata/metadata.go index 900bfb716..c4055bc00 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -97,13 +97,11 @@ func hasNotPrintable(msg string) bool { return false } -// ValidatePair validate a key-value pair with the following rules (the pseudo-header will be skipped) : -// -// - key must contain one or more characters. -// - the characters in the key must be contained in [0-9 a-z _ - .]. -// - if the key ends with a "-bin" suffix, no validation of the corresponding value is performed. -// - the characters in the every value must be printable (in [%x20-%x7E]). -func ValidatePair(key string, vals ...string) error { +// ValidateKey validates a key with the following rules (pseudo-headers are +// skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +func ValidateKey(key string) error { // key should not be empty if key == "" { return fmt.Errorf("there is an empty key in the header") @@ -119,6 +117,20 @@ func ValidatePair(key string, vals ...string) error { return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key) } } + return nil +} + +// ValidatePair validates a key-value pair with the following rules +// (pseudo-header are skipped): +// - the key must contain one or more characters. +// - the characters in the key must be in [0-9 a-z _ - .]. +// - if the key ends with a "-bin" suffix, no validation of the corresponding +// value is performed. +// - the characters in every value must be printable (in [%x20-%x7E]). +func ValidatePair(key string, vals ...string) error { + if err := ValidateKey(key); err != nil { + return err + } if strings.HasSuffix(key, "-bin") { return nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go new file mode 100644 index 000000000..1f61f1a49 --- /dev/null +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go @@ -0,0 +1,54 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proxyattributes contains functions for getting and setting proxy +// attributes like the CONNECT address and user info. +package proxyattributes + +import ( + "net/url" + + "google.golang.org/grpc/resolver" +) + +type keyType string + +const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions") + +// Options holds the proxy connection details needed during the CONNECT +// handshake. +type Options struct { + User *url.Userinfo + ConnectAddr string +} + +// Set returns a copy of addr with opts set in its attributes. +func Set(addr resolver.Address, opts Options) resolver.Address { + addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts) + return addr +} + +// Get returns the Options for the proxy [resolver.Address] and a boolean +// value representing if the attribute is present or not. The returned data +// should not be mutated. +func Get(addr resolver.Address) (Options, bool) { + if a := addr.Attributes.Value(proxyOptionsKey); a != nil { + return a.(Options), true + } + return Options{}, false +} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go new file mode 100644 index 000000000..20b8fb098 --- /dev/null +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go @@ -0,0 +1,427 @@ +/* + * + * Copyright 2024 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package delegatingresolver implements a resolver capable of resolving both +// target URIs and proxy addresses. +package delegatingresolver + +import ( + "fmt" + "net/http" + "net/url" + "sync" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/internal/transport/networktype" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +var ( + logger = grpclog.Component("delegating-resolver") + // HTTPSProxyFromEnvironment will be overwritten in the tests + HTTPSProxyFromEnvironment = http.ProxyFromEnvironment +) + +// delegatingResolver manages both target URI and proxy address resolution by +// delegating these tasks to separate child resolvers. Essentially, it acts as +// an intermediary between the gRPC ClientConn and the child resolvers. +// +// It implements the [resolver.Resolver] interface. +type delegatingResolver struct { + target resolver.Target // parsed target URI to be resolved + cc resolver.ClientConn // gRPC ClientConn + proxyURL *url.URL // proxy URL, derived from proxy environment and target + + // We do not hold both mu and childMu in the same goroutine. Avoid holding + // both locks when calling into the child, as the child resolver may + // synchronously callback into the channel. + mu sync.Mutex // protects all the fields below + targetResolverState *resolver.State // state of the target resolver + proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured + + // childMu serializes calls into child resolvers. It also protects access to + // the following fields. + childMu sync.Mutex + targetResolver resolver.Resolver // resolver for the target URI, based on its scheme + proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured +} + +// nopResolver is a resolver that does nothing. +type nopResolver struct{} + +func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {} + +func (nopResolver) Close() {} + +// proxyURLForTarget determines the proxy URL for the given address based on the +// environment. It can return the following: +// - nil URL, nil error: No proxy is configured or the address is excluded +// using the `NO_PROXY` environment variable or if req.URL.Host is +// "localhost" (with or without // a port number) +// - nil URL, non-nil error: An error occurred while retrieving the proxy URL. +// - non-nil URL, nil error: A proxy is configured, and the proxy URL was +// retrieved successfully without any errors. +func proxyURLForTarget(address string) (*url.URL, error) { + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address, + }} + return HTTPSProxyFromEnvironment(req) +} + +// New creates a new delegating resolver that can create up to two child +// resolvers: +// - one to resolve the proxy address specified using the supported +// environment variables. This uses the registered resolver for the "dns" +// scheme. It is lazily built when a target resolver update contains at least +// one TCP address. +// - one to resolve the target URI using the resolver specified by the scheme +// in the target URI or specified by the user using the WithResolvers dial +// option. As a special case, if the target URI's scheme is "dns" and a +// proxy is specified using the supported environment variables, the target +// URI's path portion is used as the resolved address unless target +// resolution is enabled using the dial option. +func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) { + r := &delegatingResolver{ + target: target, + cc: cc, + proxyResolver: nopResolver{}, + targetResolver: nopResolver{}, + } + + var err error + r.proxyURL, err = proxyURLForTarget(target.Endpoint()) + if err != nil { + return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err) + } + + // proxy is not configured or proxy address excluded using `NO_PROXY` env + // var, so only target resolver is used. + if r.proxyURL == nil { + return targetResolverBuilder.Build(target, cc, opts) + } + + if logger.V(2) { + logger.Infof("Proxy URL detected : %s", r.proxyURL) + } + + // Resolver updates from one child may trigger calls into the other. Block + // updates until the children are initialized. + r.childMu.Lock() + defer r.childMu.Unlock() + // When the scheme is 'dns' and target resolution on client is not enabled, + // resolution should be handled by the proxy, not the client. Therefore, we + // bypass the target resolver and store the unresolved target address. + if target.URL.Scheme == "dns" && !targetResolutionEnabled { + r.targetResolverState = &resolver.State{ + Addresses: []resolver.Address{{Addr: target.Endpoint()}}, + Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}}, + } + r.updateTargetResolverState(*r.targetResolverState) + return r, nil + } + wcc := &wrappingClientConn{ + stateListener: r.updateTargetResolverState, + parent: r, + } + if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil { + return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err) + } + return r, nil +} + +// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns" +// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds +// a resolver with a wrappingClientConn to capture resolved addresses. +func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) { + proxyBuilder := resolver.Get("dns") + if proxyBuilder == nil { + panic("delegating_resolver: resolver for proxy not found for scheme dns") + } + url := *r.proxyURL + url.Scheme = "dns" + url.Path = "/" + r.proxyURL.Host + url.Host = "" // Clear the Host field to conform to the "dns:///" format + + proxyTarget := resolver.Target{URL: url} + wcc := &wrappingClientConn{ + stateListener: r.updateProxyResolverState, + parent: r, + } + return proxyBuilder.Build(proxyTarget, wcc, opts) +} + +func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) { + r.childMu.Lock() + defer r.childMu.Unlock() + r.targetResolver.ResolveNow(o) + r.proxyResolver.ResolveNow(o) +} + +func (r *delegatingResolver) Close() { + r.childMu.Lock() + defer r.childMu.Unlock() + r.targetResolver.Close() + r.targetResolver = nil + + r.proxyResolver.Close() + r.proxyResolver = nil +} + +func needsProxyResolver(state *resolver.State) bool { + for _, addr := range state.Addresses { + if !skipProxy(addr) { + return true + } + } + for _, endpoint := range state.Endpoints { + for _, addr := range endpoint.Addresses { + if !skipProxy(addr) { + return true + } + } + } + return false +} + +func skipProxy(address resolver.Address) bool { + // Avoid proxy when network is not tcp. + networkType, ok := networktype.Get(address) + if !ok { + networkType, _ = transport.ParseDialTarget(address.Addr) + } + if networkType != "tcp" { + return true + } + + req := &http.Request{URL: &url.URL{ + Scheme: "https", + Host: address.Addr, + }} + // Avoid proxy when address included in `NO_PROXY` environment variable or + // fails to get the proxy address. + url, err := HTTPSProxyFromEnvironment(req) + if err != nil || url == nil { + return true + } + return false +} + +// updateClientConnStateLocked constructs a combined list of addresses by +// pairing each proxy address with every target address of type TCP. For each +// pair, it creates a new [resolver.Address] using the proxy address and +// attaches the corresponding target address and user info as attributes. Target +// addresses that are not of type TCP are appended to the list as-is. The +// function returns nil if either resolver has not yet provided an update, and +// returns the result of ClientConn.UpdateState once both resolvers have +// provided at least one update. +func (r *delegatingResolver) updateClientConnStateLocked() error { + if r.targetResolverState == nil || r.proxyAddrs == nil { + return nil + } + + // If multiple resolved proxy addresses are present, we send only the + // unresolved proxy host and let net.Dial handle the proxy host name + // resolution when creating the transport. Sending all resolved addresses + // would increase the number of addresses passed to the ClientConn and + // subsequently to load balancing (LB) policies like Round Robin, leading + // to additional TCP connections. However, if there's only one resolved + // proxy address, we send it directly, as it doesn't affect the address + // count returned by the target resolver and the address count sent to the + // ClientConn. + var proxyAddr resolver.Address + if len(r.proxyAddrs) == 1 { + proxyAddr = r.proxyAddrs[0] + } else { + proxyAddr = resolver.Address{Addr: r.proxyURL.Host} + } + var addresses []resolver.Address + for _, targetAddr := range (*r.targetResolverState).Addresses { + if skipProxy(targetAddr) { + addresses = append(addresses, targetAddr) + continue + } + addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + + // For each target endpoint, construct a new [resolver.Endpoint] that + // includes all addresses from all proxy endpoints and the addresses from + // that target endpoint, preserving the number of target endpoints. + var endpoints []resolver.Endpoint + for _, endpt := range (*r.targetResolverState).Endpoints { + var addrs []resolver.Address + for _, targetAddr := range endpt.Addresses { + // Avoid proxy when network is not tcp. + if skipProxy(targetAddr) { + addrs = append(addrs, targetAddr) + continue + } + for _, proxyAddr := range r.proxyAddrs { + addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{ + User: r.proxyURL.User, + ConnectAddr: targetAddr.Addr, + })) + } + } + endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs}) + } + // Use the targetResolverState for its service config and attributes + // contents. The state update is only sent after both the target and proxy + // resolvers have sent their updates, and curState has been updated with the + // combined addresses. + curState := *r.targetResolverState + curState.Addresses = addresses + curState.Endpoints = endpoints + return r.cc.UpdateState(curState) +} + +// updateProxyResolverState updates the proxy resolver state by storing proxy +// addresses and endpoints, marking the resolver as ready, and triggering a +// state update if both proxy and target resolvers are ready. If the ClientConn +// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It +// is a StateListener function of wrappingClientConn passed to the proxy +// resolver. +func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + if logger.V(2) { + logger.Infof("Addresses received from proxy resolver: %s", state.Addresses) + } + if len(state.Endpoints) > 0 { + // We expect exactly one address per endpoint because the proxy resolver + // uses "dns" resolution. + r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints)) + for _, endpoint := range state.Endpoints { + r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...) + } + } else if state.Addresses != nil { + r.proxyAddrs = state.Addresses + } else { + r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received + } + err := r.updateClientConnStateLocked() + // Another possible approach was to block until updates are received from + // both resolvers. But this is not used because calling `New()` triggers + // `Build()` for the first resolver, which calls `UpdateState()`. And the + // second resolver hasn't sent an update yet, so it would cause `New()` to + // block indefinitely. + if err != nil { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.targetResolver != nil { + r.targetResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() + } + return err +} + +// updateTargetResolverState is the StateListener function provided to the +// target resolver via wrappingClientConn. It updates the resolver state and +// marks the target resolver as ready. If the update includes at least one TCP +// address and the proxy resolver has not yet been constructed, it initializes +// the proxy resolver. A combined state update is triggered once both resolvers +// are ready. If all addresses are non-TCP, it proceeds without waiting for the +// proxy resolver. If ClientConn.UpdateState returns a non-nil error, +// ResolveNow() is called on the proxy resolver. +func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error { + r.mu.Lock() + defer r.mu.Unlock() + + if logger.V(2) { + logger.Infof("Addresses received from target resolver: %v", state.Addresses) + } + r.targetResolverState = &state + // If all addresses returned by the target resolver have a non-TCP network + // type, or are listed in the `NO_PROXY` environment variable, do not wait + // for proxy update. + if !needsProxyResolver(r.targetResolverState) { + return r.cc.UpdateState(*r.targetResolverState) + } + + // The proxy resolver may be rebuilt multiple times, specifically each time + // the target resolver sends an update, even if the target resolver is built + // successfully but building the proxy resolver fails. + if len(r.proxyAddrs) == 0 { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if _, ok := r.proxyResolver.(nopResolver); !ok { + return + } + proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{}) + if err != nil { + r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err)) + return + } + r.proxyResolver = proxyResolver + }() + } + + err := r.updateClientConnStateLocked() + if err != nil { + go func() { + r.childMu.Lock() + defer r.childMu.Unlock() + if r.proxyResolver != nil { + r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{}) + } + }() + } + return nil +} + +// wrappingClientConn serves as an intermediary between the parent ClientConn +// and the child resolvers created here. It implements the resolver.ClientConn +// interface and is passed in that capacity to the child resolvers. +type wrappingClientConn struct { + // Callback to deliver resolver state updates + stateListener func(state resolver.State) error + parent *delegatingResolver +} + +// UpdateState receives resolver state updates and forwards them to the +// appropriate listener function (either for the proxy or target resolver). +func (wcc *wrappingClientConn) UpdateState(state resolver.State) error { + return wcc.stateListener(state) +} + +// ReportError intercepts errors from the child resolvers and passes them to +// ClientConn. +func (wcc *wrappingClientConn) ReportError(err error) { + wcc.parent.cc.ReportError(err) +} + +// NewAddress intercepts the new resolved address from the child resolvers and +// passes them to ClientConn. +func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) { + wcc.UpdateState(resolver.State{Addresses: addrs}) +} + +// ParseServiceConfig parses the provided service config and returns an object +// that provides the parsed config. +func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult { + return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON) +} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/client_stream.go index 8ed347c54..ccc0e017e 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/client_stream.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/client_stream.go @@ -59,7 +59,7 @@ func (s *ClientStream) Read(n int) (mem.BufferSlice, error) { return b, err } -// Close closes the stream and popagates err to any readers. +// Close closes the stream and propagates err to any readers. func (s *ClientStream) Close(err error) { var ( rst bool diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/handler_server.go index d9305a65d..3dea23573 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -498,5 +498,5 @@ func mapRecvMsgError(err error) error { if strings.Contains(err.Error(), "body closed by handler") { return status.Error(codes.Canceled, err.Error()) } - return connectionErrorf(true, err, err.Error()) + return connectionErrorf(true, err, "%s", err.Error()) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_client.go index f323ab7f4..171e690a3 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -43,6 +43,7 @@ import ( "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" + "google.golang.org/grpc/internal/proxyattributes" istatus "google.golang.org/grpc/internal/status" isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" @@ -153,7 +154,7 @@ type http2Client struct { logger *grpclog.PrefixLogger } -func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, useProxy bool, grpcUA string) (net.Conn, error) { +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) { address := addr.Addr networkType, ok := networktype.Get(addr) if fn != nil { @@ -175,10 +176,10 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error return fn(ctx, address) } if !ok { - networkType, address = parseDialTarget(address) + networkType, address = ParseDialTarget(address) } - if networkType == "tcp" && useProxy { - return proxyDial(ctx, address, grpcUA) + if opts, present := proxyattributes.Get(addr); present { + return proxyDial(ctx, addr, grpcUA, opts) } return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } @@ -217,7 +218,7 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts // address specific arbitrary data to reach custom dialers and credential handshakers. connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes}) - conn, err := dial(connectCtx, opts.Dialer, addr, opts.UseProxy, opts.UserAgent) + conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent) if err != nil { if opts.FailOnNonTempDialError { return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) @@ -1241,7 +1242,8 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { statusCode = codes.DeadlineExceeded } } - t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) + st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode) + t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false) } func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { @@ -1389,8 +1391,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error { // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason - switch f.ErrCode { - case http2.ErrCodeEnhanceYourCalm: + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { if string(f.DebugData()) == "too_many_pings" { t.goAwayReason = GoAwayTooManyPings } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0055fddd7..7e53eb173 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -35,6 +35,7 @@ import ( "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcutil" "google.golang.org/grpc/internal/pretty" @@ -564,7 +565,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade t.logger.Infof("Aborting the stream early: %v", errMsg) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 405, + httpStatus: http.StatusMethodNotAllowed, streamID: streamID, contentSubtype: s.contentSubtype, status: status.New(codes.Internal, errMsg), @@ -585,7 +586,7 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade stat = status.New(codes.PermissionDenied, err.Error()) } t.controlBuf.put(&earlyAbortStream{ - httpStatus: 200, + httpStatus: http.StatusOK, streamID: s.id, contentSubtype: s.contentSubtype, status: stat, @@ -598,6 +599,22 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade if len(t.activeStreams) == 1 { t.idle = time.Time{} } + // Start a timer to close the stream on reaching the deadline. + if timeoutSet { + // We need to wait for s.cancel to be updated before calling + // t.closeStream to avoid data races. + cancelUpdated := make(chan struct{}) + timer := internal.TimeAfterFunc(timeout, func() { + <-cancelUpdated + t.closeStream(s, true, http2.ErrCodeCancel, false) + }) + oldCancel := s.cancel + s.cancel = func() { + oldCancel() + timer.Stop() + } + close(cancelUpdated) + } t.mu.Unlock() if channelz.IsOn() { t.channelz.SocketMetrics.StreamsStarted.Add(1) @@ -1274,7 +1291,6 @@ func (t *http2Server) Close(err error) { // deleteStream deletes the stream s from transport's active streams. func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) { - t.mu.Lock() if _, ok := t.activeStreams[s.id]; ok { delete(t.activeStreams, s.id) @@ -1324,7 +1340,10 @@ func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCo // called to interrupt the potential blocking on other goroutines. s.cancel() - s.swapState(streamDone) + oldState := s.swapState(streamDone) + if oldState == streamDone { + return + } t.deleteStream(s, eosReceived) t.controlBuf.put(&cleanupStream{ diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http_util.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http_util.go index 3613d7b64..f997f9fdb 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -439,8 +439,8 @@ func getWriteBufferPool(size int) *sync.Pool { return pool } -// parseDialTarget returns the network and address to pass to dialer. -func parseDialTarget(target string) (string, string) { +// ParseDialTarget returns the network and address to pass to dialer. +func ParseDialTarget(target string) (string, string) { net := "tcp" m1 := strings.Index(target, ":") m2 := strings.Index(target, ":/") diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/proxy.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/proxy.go index 54b224436..d77384595 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -30,34 +30,16 @@ import ( "net/url" "google.golang.org/grpc/internal" + "google.golang.org/grpc/internal/proxyattributes" + "google.golang.org/grpc/resolver" ) const proxyAuthHeaderKey = "Proxy-Authorization" -var ( - // The following variable will be overwritten in the tests. - httpProxyFromEnvironment = http.ProxyFromEnvironment -) - -func mapAddress(address string) (*url.URL, error) { - req := &http.Request{ - URL: &url.URL{ - Scheme: "https", - Host: address, - }, - } - url, err := httpProxyFromEnvironment(req) - if err != nil { - return nil, err - } - return url, nil -} - // To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. -// It's possible that this reader reads more than what's need for the response and stores -// those bytes in the buffer. -// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the -// bytes in the buffer. +// It's possible that this reader reads more than what's need for the response +// and stores those bytes in the buffer. bufConn wraps the original net.Conn +// and the bufio.Reader to make sure we don't lose the bytes in the buffer. type bufConn struct { net.Conn r io.Reader @@ -72,7 +54,7 @@ func basicAuth(username, password string) string { return base64.StdEncoding.EncodeToString([]byte(auth)) } -func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL, grpcUA string) (_ net.Conn, err error) { +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) { defer func() { if err != nil { conn.Close() @@ -81,15 +63,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri req := &http.Request{ Method: http.MethodConnect, - URL: &url.URL{Host: backendAddr}, + URL: &url.URL{Host: opts.ConnectAddr}, Header: map[string][]string{"User-Agent": {grpcUA}}, } - if t := proxyURL.User; t != nil { - u := t.Username() - p, _ := t.Password() + if user := opts.User; user != nil { + u := user.Username() + p, _ := user.Password() req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p)) } - if err := sendHTTPRequest(ctx, req, conn); err != nil { return nil, fmt.Errorf("failed to write the HTTP request: %v", err) } @@ -117,28 +98,13 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri return conn, nil } -// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy -// is necessary, dials, does the HTTP CONNECT handshake, and returns the -// connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { - newAddr := addr - proxyURL, err := mapAddress(addr) +// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake. +func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) { + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr) if err != nil { return nil, err } - if proxyURL != nil { - newAddr = proxyURL.Host - } - - conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) - if err != nil { - return nil, err - } - if proxyURL == nil { - // proxy is disabled if proxyURL is nil. - return conn, err - } - return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return doHTTPConnectHandshake(ctx, conn, grpcUA, opts) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/server_stream.go index a22a90151..cf8da0b52 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/server_stream.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/server_stream.go @@ -35,8 +35,10 @@ type ServerStream struct { *Stream // Embed for common stream functionality. st internalServerTransport - ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) - cancel context.CancelFunc // invoked at the end of stream to cancel ctx. + ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance) + // cancel is invoked at the end of stream to cancel ctx. It also stops the + // timer for monitoring the rpc deadline if configured. + cancel func() // Holds compressor names passed in grpc-accept-encoding metadata from the // client. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/transport.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/transport.go index 2859b8775..af4a4aeab 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -502,8 +502,6 @@ type ConnectOptions struct { ChannelzParent *channelz.SubChannel // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. MaxHeaderListSize *uint32 - // UseProxy specifies if a proxy should be used. - UseProxy bool // The mem.BufferPool to use when reading/writing to the wire. BufferPool mem.BufferPool } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/picker_wrapper.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/picker_wrapper.go index bdaa2130e..a2d2a798d 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/picker_wrapper.go @@ -123,7 +123,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. if lastPickErr != nil { errStr = "latest balancer error: " + lastPickErr.Error() } else { - errStr = fmt.Sprintf("received context error while waiting for new LB policy update: %s", ctx.Err().Error()) + errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err()) } switch ctx.Err() { case context.DeadlineExceeded: diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/map.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/map.go index ada5b9bb7..c3c15ac96 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/map.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/map.go @@ -18,16 +18,28 @@ package resolver -type addressMapEntry struct { +import ( + "encoding/base64" + "sort" + "strings" +) + +type addressMapEntry[T any] struct { addr Address - value any + value T } -// AddressMap is a map of addresses to arbitrary values taking into account +// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming +// release of grpc-go. +// +// Deprecated: use the generic AddressMapV2 type instead. +type AddressMap = AddressMapV2[any] + +// AddressMapV2 is a map of addresses to arbitrary values taking into account // Attributes. BalancerAttributes are ignored, as are Metadata and Type. // Multiple accesses may not be performed concurrently. Must be created via // NewAddressMap; do not construct directly. -type AddressMap struct { +type AddressMapV2[T any] struct { // The underlying map is keyed by an Address with fields that we don't care // about being set to their zero values. The only fields that we care about // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to @@ -41,23 +53,30 @@ type AddressMap struct { // The value type of the map contains a slice of addresses which match the key // in their `Addr` and `ServerName` fields and contain the corresponding value // associated with them. - m map[Address]addressMapEntryList + m map[Address]addressMapEntryList[T] } func toMapKey(addr *Address) Address { return Address{Addr: addr.Addr, ServerName: addr.ServerName} } -type addressMapEntryList []*addressMapEntry +type addressMapEntryList[T any] []*addressMapEntry[T] -// NewAddressMap creates a new AddressMap. +// NewAddressMap creates a new AddressMapV2[any]. +// +// Deprecated: use the generic NewAddressMapV2 constructor instead. func NewAddressMap() *AddressMap { - return &AddressMap{m: make(map[Address]addressMapEntryList)} + return NewAddressMapV2[any]() +} + +// NewAddressMapV2 creates a new AddressMapV2. +func NewAddressMapV2[T any]() *AddressMapV2[T] { + return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])} } // find returns the index of addr in the addressMapEntry slice, or -1 if not // present. -func (l addressMapEntryList) find(addr Address) int { +func (l addressMapEntryList[T]) find(addr Address) int { for i, entry := range l { // Attributes are the only thing to match on here, since `Addr` and // `ServerName` are already equal. @@ -69,28 +88,28 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value any, ok bool) { +func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { return entryList[entry].value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value any) { +func (a *AddressMapV2[T]) Set(addr Address, value T) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { entryList[entry].value = value return } - a.m[addrKey] = append(entryList, &addressMapEntry{addr: addr, value: value}) + a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value}) } // Delete removes addr from the map. -func (a *AddressMap) Delete(addr Address) { +func (a *AddressMapV2[T]) Delete(addr Address) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] entry := entryList.find(addr) @@ -107,7 +126,7 @@ func (a *AddressMap) Delete(addr Address) { } // Len returns the number of entries in the map. -func (a *AddressMap) Len() int { +func (a *AddressMapV2[T]) Len() int { ret := 0 for _, entryList := range a.m { ret += len(entryList) @@ -116,7 +135,7 @@ func (a *AddressMap) Len() int { } // Keys returns a slice of all current map keys. -func (a *AddressMap) Keys() []Address { +func (a *AddressMapV2[T]) Keys() []Address { ret := make([]Address, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { @@ -127,8 +146,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []any { - ret := make([]any, 0, a.Len()) +func (a *AddressMapV2[T]) Values() []T { + ret := make([]T, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -137,70 +156,65 @@ func (a *AddressMap) Values() []any { return ret } -type endpointNode struct { - addrs map[string]struct{} -} - -// Equal returns whether the unordered set of addrs are the same between the -// endpoint nodes. -func (en *endpointNode) Equal(en2 *endpointNode) bool { - if len(en.addrs) != len(en2.addrs) { - return false - } - for addr := range en.addrs { - if _, ok := en2.addrs[addr]; !ok { - return false - } - } - return true -} - -func toEndpointNode(endpoint Endpoint) endpointNode { - en := make(map[string]struct{}) - for _, addr := range endpoint.Addresses { - en[addr.Addr] = struct{}{} - } - return endpointNode{ - addrs: en, - } -} +type endpointMapKey string // EndpointMap is a map of endpoints to arbitrary values keyed on only the // unordered set of address strings within an endpoint. This map is not thread // safe, thus it is unsafe to access concurrently. Must be created via // NewEndpointMap; do not construct directly. -type EndpointMap struct { - endpoints map[*endpointNode]any +type EndpointMap[T any] struct { + endpoints map[endpointMapKey]endpointData[T] +} + +type endpointData[T any] struct { + // decodedKey stores the original key to avoid decoding when iterating on + // EndpointMap keys. + decodedKey Endpoint + value T } // NewEndpointMap creates a new EndpointMap. -func NewEndpointMap() *EndpointMap { - return &EndpointMap{ - endpoints: make(map[*endpointNode]any), +func NewEndpointMap[T any]() *EndpointMap[T] { + return &EndpointMap[T]{ + endpoints: make(map[endpointMapKey]endpointData[T]), } } +// encodeEndpoint returns a string that uniquely identifies the unordered set of +// addresses within an endpoint. +func encodeEndpoint(e Endpoint) endpointMapKey { + addrs := make([]string, 0, len(e.Addresses)) + // base64 encoding the address strings restricts the characters present + // within the strings. This allows us to use a delimiter without the need of + // escape characters. + for _, addr := range e.Addresses { + addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr))) + } + sort.Strings(addrs) + // " " should not appear in base64 encoded strings. + return endpointMapKey(strings.Join(addrs, " ")) +} + // Get returns the value for the address in the map, if present. -func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - return em.endpoints[endpoint], true +func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) { + val, found := em.endpoints[encodeEndpoint(e)] + if found { + return val.value, true } - return nil, false + return value, false } // Set updates or adds the value to the address in the map. -func (em *EndpointMap) Set(e Endpoint, value any) { - en := toEndpointNode(e) - if endpoint := em.find(en); endpoint != nil { - em.endpoints[endpoint] = value - return +func (em *EndpointMap[T]) Set(e Endpoint, value T) { + en := encodeEndpoint(e) + em.endpoints[en] = endpointData[T]{ + decodedKey: Endpoint{Addresses: e.Addresses}, + value: value, } - em.endpoints[&en] = value } // Len returns the number of entries in the map. -func (em *EndpointMap) Len() int { +func (em *EndpointMap[T]) Len() int { return len(em.endpoints) } @@ -209,43 +223,25 @@ func (em *EndpointMap) Len() int { // the unordered set of addresses. Thus, endpoint information returned is not // the full endpoint data (drops duplicated addresses and attributes) but can be // used for EndpointMap accesses. -func (em *EndpointMap) Keys() []Endpoint { +func (em *EndpointMap[T]) Keys() []Endpoint { ret := make([]Endpoint, 0, len(em.endpoints)) - for en := range em.endpoints { - var endpoint Endpoint - for addr := range en.addrs { - endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) - } - ret = append(ret, endpoint) + for _, en := range em.endpoints { + ret = append(ret, en.decodedKey) } return ret } // Values returns a slice of all current map values. -func (em *EndpointMap) Values() []any { - ret := make([]any, 0, len(em.endpoints)) +func (em *EndpointMap[T]) Values() []T { + ret := make([]T, 0, len(em.endpoints)) for _, val := range em.endpoints { - ret = append(ret, val) + ret = append(ret, val.value) } return ret } -// find returns a pointer to the endpoint node in em if the endpoint node is -// already present. If not found, nil is returned. The comparisons are done on -// the unordered set of addresses within an endpoint. -func (em EndpointMap) find(e endpointNode) *endpointNode { - for endpoint := range em.endpoints { - if e.Equal(endpoint) { - return endpoint - } - } - return nil -} - // Delete removes the specified endpoint from the map. -func (em *EndpointMap) Delete(e Endpoint) { - en := toEndpointNode(e) - if entry := em.find(en); entry != nil { - delete(em.endpoints, entry) - } +func (em *EndpointMap[T]) Delete(e Endpoint) { + en := encodeEndpoint(e) + delete(em.endpoints, en) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/resolver.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/resolver.go index 8eb1cf3bc..b84ef26d4 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver/resolver.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/internal" "google.golang.org/grpc/serviceconfig" ) @@ -175,6 +176,8 @@ type BuildOptions struct { // Authority is the effective authority of the clientconn for which the // resolver is built. Authority string + // MetricsRecorder is the metrics recorder to do recording. + MetricsRecorder stats.MetricsRecorder } // An Endpoint is one network endpoint, or server, which may have multiple diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver_wrapper.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver_wrapper.go index 23bb3fb25..80e16a327 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver_wrapper.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -26,6 +26,7 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/internal/resolver/delegatingresolver" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -76,9 +77,19 @@ func (ccr *ccResolverWrapper) start() error { CredsBundle: ccr.cc.dopts.copts.CredsBundle, Dialer: ccr.cc.dopts.copts.Dialer, Authority: ccr.cc.authority, + MetricsRecorder: ccr.cc.metricsRecorderList, } var err error - ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + // The delegating resolver is used unless: + // - A custom dialer is provided via WithContextDialer dialoption or + // - Proxy usage is disabled through WithNoProxy dialoption. + // In these cases, the resolver is built based on the scheme of target, + // using the appropriate resolver builder. + if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy { + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + } else { + ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution) + } errCh <- err }) return <-errCh @@ -123,12 +134,7 @@ func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { return nil } if s.Endpoints == nil { - s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) - for _, a := range s.Addresses { - ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} - ep.Addresses[0].BalancerAttributes = nil - s.Endpoints = append(s.Endpoints, ep) - } + s.Endpoints = addressesToEndpoints(s.Addresses) } ccr.addChannelzTraceEvent(s) ccr.curState = s @@ -161,7 +167,11 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { ccr.cc.mu.Unlock() return } - s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + s := resolver.State{ + Addresses: addrs, + ServiceConfig: ccr.curState.ServiceConfig, + Endpoints: addressesToEndpoints(addrs), + } ccr.addChannelzTraceEvent(s) ccr.curState = s ccr.mu.Unlock() @@ -199,3 +209,13 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } + +func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint { + endpoints := make([]resolver.Endpoint, 0, len(addrs)) + for _, a := range addrs { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + endpoints = append(endpoints, ep) + } + return endpoints +} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/rpc_util.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/rpc_util.go index 9fac2b08b..ad20e9dff 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/rpc_util.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/rpc_util.go @@ -151,7 +151,7 @@ func (d *gzipDecompressor) Type() string { // callInfo contains all related configuration and information about an RPC. type callInfo struct { - compressorType string + compressorName string failFast bool maxReceiveMessageSize *int maxSendMessageSize *int @@ -222,7 +222,7 @@ type HeaderCallOption struct { func (o HeaderCallOption) before(*callInfo) error { return nil } func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.HeaderAddr, _ = attempt.s.Header() + *o.HeaderAddr, _ = attempt.transportStream.Header() } // Trailer returns a CallOptions that retrieves the trailer metadata @@ -244,7 +244,7 @@ type TrailerCallOption struct { func (o TrailerCallOption) before(*callInfo) error { return nil } func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) { - *o.TrailerAddr = attempt.s.Trailer() + *o.TrailerAddr = attempt.transportStream.Trailer() } // Peer returns a CallOption that retrieves peer information for a unary RPC. @@ -266,7 +266,7 @@ type PeerCallOption struct { func (o PeerCallOption) before(*callInfo) error { return nil } func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) { - if x, ok := peer.FromContext(attempt.s.Context()); ok { + if x, ok := peer.FromContext(attempt.transportStream.Context()); ok { *o.PeerAddr = *x } } @@ -435,7 +435,7 @@ type CompressorCallOption struct { } func (o CompressorCallOption) before(c *callInfo) error { - c.compressorType = o.CompressorType + c.compressorName = o.CompressorType return nil } func (o CompressorCallOption) after(*callInfo, *csAttempt) {} @@ -692,9 +692,9 @@ func encode(c baseCodec, msg any) (mem.BufferSlice, error) { if err != nil { return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) } - if uint(b.Len()) > math.MaxUint32 { + if bufSize := uint(b.Len()); bufSize > math.MaxUint32 { b.Free() - return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize) } return b, nil } @@ -828,30 +828,13 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM return nil, st.Err() } - var size int if pf.isCompressed() { defer compressed.Free() - // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. - if dc != nil { - var uncompressedBuf []byte - uncompressedBuf, err = dc.Do(compressed.Reader()) - if err == nil { - out = mem.BufferSlice{mem.SliceBuffer(uncompressedBuf)} - } - size = len(uncompressedBuf) - } else { - out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool) - } + out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool) if err != nil { - return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) - } - if size > maxReceiveMessageSize { - out.Free() - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) + return nil, err } } else { out = compressed @@ -866,20 +849,46 @@ func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveM return out, nil } -// Using compressor, decompress d, returning data and size. -// Optionally, if data will be over maxReceiveMessageSize, just return the size. -func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) { - dcReader, err := compressor.Decompress(d.Reader()) - if err != nil { - return nil, 0, err +// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor. +// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data +// does not exceed the specified maximum size and returns an error if this limit is exceeded. +// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit. +func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) { + if dc != nil { + uncompressed, err := dc.Do(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) + } + if len(uncompressed) > maxReceiveMessageSize { + return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize) + } + return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil } + if compressor != nil { + dcReader, err := compressor.Decompress(d.Reader()) + if err != nil { + return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err) + } - out, err := mem.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1), pool) - if err != nil { - out.Free() - return nil, 0, err + // Read at most one byte more than the limit from the decompressor. + // Unless the limit is MaxInt64, in which case, that's impossible, so + // apply no limit. + if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 { + dcReader = io.LimitReader(dcReader, limit+1) + } + out, err := mem.ReadAll(dcReader, pool) + if err != nil { + out.Free() + return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err) + } + + if out.Len() > maxReceiveMessageSize { + out.Free() + return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize) + } + return out, nil } - return out, out.Len(), nil + return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload") } type recvCompressor interface { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/server.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/server.go index 16065a027..976e70ae0 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/server.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/server.go @@ -37,12 +37,14 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/encoding" "google.golang.org/grpc/encoding/proto" + estats "google.golang.org/grpc/experimental/stats" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/grpcutil" + istats "google.golang.org/grpc/internal/stats" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/mem" @@ -82,6 +84,9 @@ func init() { internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption internal.BufferPool = bufferPool + internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder { + return istats.NewMetricsRecorderList(srv.opts.statsHandlers) + } } var statusOK = status.New(codes.OK, "") @@ -643,7 +648,7 @@ func (s *Server) serverWorker() { // connections to reduce the time spent overall on runtime.morestack. func (s *Server) initServerWorkers() { s.serverWorkerChannel = make(chan func()) - s.serverWorkerChannelClose = grpcsync.OnceFunc(func() { + s.serverWorkerChannelClose = sync.OnceFunc(func() { close(s.serverWorkerChannel) }) for i := uint32(0); i < s.opts.numServerWorkers; i++ { @@ -1360,8 +1365,16 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt } return err } - defer d.Free() + freed := false + dataFree := func() { + if !freed { + d.Free() + freed = true + } + } + defer dataFree() df := func(v any) error { + defer dataFree() if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } @@ -1637,10 +1650,10 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv // If dc is set and matches the stream's compression, use it. Otherwise, try // to find a matching registered compressor for decomp. if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { - ss.dc = s.opts.dc + ss.decompressorV0 = s.opts.dc } else if rc != "" && rc != encoding.Identity { - ss.decomp = encoding.GetCompressor(rc) - if ss.decomp == nil { + ss.decompressorV1 = encoding.GetCompressor(rc) + if ss.decompressorV1 == nil { st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) ss.s.WriteStatus(st) return st.Err() @@ -1652,12 +1665,12 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv // // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. if s.opts.cp != nil { - ss.cp = s.opts.cp + ss.compressorV0 = s.opts.cp ss.sendCompressorName = s.opts.cp.Type() } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { // Legacy compressor not specified; attempt to respond with same encoding. - ss.comp = encoding.GetCompressor(rc) - if ss.comp != nil { + ss.compressorV1 = encoding.GetCompressor(rc) + if ss.compressorV1 != nil { ss.sendCompressorName = rc } } @@ -1668,7 +1681,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv } } - ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.cp, ss.comp) + ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1) if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1922,7 +1935,7 @@ func (s *Server) stop(graceful bool) { s.conns = nil if s.opts.numServerWorkers > 0 { - // Closing the channel (only once, via grpcsync.OnceFunc) after all the + // Closing the channel (only once, via sync.OnceFunc) after all the // connections have been closed above ensures that there are no // goroutines executing the callback passed to st.HandleStreams (where // the channel is written to). diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/service_config.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/service_config.go index 7e83027d1..8d451e07c 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/service_config.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/service_config.go @@ -268,18 +268,21 @@ func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult { return &serviceconfig.ParseResult{Config: &sc} } +func isValidRetryPolicy(jrp *jsonRetryPolicy) bool { + return jrp.MaxAttempts > 1 && + jrp.InitialBackoff > 0 && + jrp.MaxBackoff > 0 && + jrp.BackoffMultiplier > 0 && + len(jrp.RetryableStatusCodes) > 0 +} + func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) { if jrp == nil { return nil, nil } - if jrp.MaxAttempts <= 1 || - jrp.InitialBackoff <= 0 || - jrp.MaxBackoff <= 0 || - jrp.BackoffMultiplier <= 0 || - len(jrp.RetryableStatusCodes) == 0 { - logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) - return nil, nil + if !isValidRetryPolicy(jrp) { + return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp) } if jrp.MaxAttempts < maxAttempts { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/stats/stats.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/stats/stats.go index 6f20d2d54..baf7740ef 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/stats/stats.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/stats/stats.go @@ -36,7 +36,12 @@ type RPCStats interface { IsClient() bool } -// Begin contains stats when an RPC attempt begins. +// Begin contains stats for the start of an RPC attempt. +// +// - Server-side: Triggered after `InHeader`, as headers are processed +// before the RPC lifecycle begins. +// - Client-side: The first stats event recorded. +// // FailFast is only valid if this Begin is from client side. type Begin struct { // Client is true if this Begin is from client side. @@ -69,7 +74,7 @@ func (*PickerUpdated) IsClient() bool { return true } func (*PickerUpdated) isRPCStats() {} -// InPayload contains the information for an incoming payload. +// InPayload contains stats about an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool @@ -98,7 +103,9 @@ func (s *InPayload) IsClient() bool { return s.Client } func (s *InPayload) isRPCStats() {} -// InHeader contains stats when a header is received. +// InHeader contains stats about header reception. +// +// - Server-side: The first stats event after the RPC request is received. type InHeader struct { // Client is true if this InHeader is from client side. Client bool @@ -123,7 +130,7 @@ func (s *InHeader) IsClient() bool { return s.Client } func (s *InHeader) isRPCStats() {} -// InTrailer contains stats when a trailer is received. +// InTrailer contains stats about trailer reception. type InTrailer struct { // Client is true if this InTrailer is from client side. Client bool @@ -139,7 +146,7 @@ func (s *InTrailer) IsClient() bool { return s.Client } func (s *InTrailer) isRPCStats() {} -// OutPayload contains the information for an outgoing payload. +// OutPayload contains stats about an outgoing payload. type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool @@ -166,7 +173,10 @@ func (s *OutPayload) IsClient() bool { return s.Client } func (s *OutPayload) isRPCStats() {} -// OutHeader contains stats when a header is sent. +// OutHeader contains stats about header transmission. +// +// - Client-side: Only occurs after 'Begin', as headers are always the first +// thing sent on a stream. type OutHeader struct { // Client is true if this OutHeader is from client side. Client bool @@ -189,14 +199,15 @@ func (s *OutHeader) IsClient() bool { return s.Client } func (s *OutHeader) isRPCStats() {} -// OutTrailer contains stats when a trailer is sent. +// OutTrailer contains stats about trailer transmission. type OutTrailer struct { // Client is true if this OutTrailer is from client side. Client bool // WireLength is the wire length of trailer. // - // Deprecated: This field is never set. The length is not known when this message is - // emitted because the trailer fields are compressed with hpack after that. + // Deprecated: This field is never set. The length is not known when this + // message is emitted because the trailer fields are compressed with hpack + // after that. WireLength int // Trailer contains the trailer metadata sent to the client. This // field is only valid if this OutTrailer is from the server side. @@ -208,7 +219,7 @@ func (s *OutTrailer) IsClient() bool { return s.Client } func (s *OutTrailer) isRPCStats() {} -// End contains stats when an RPC ends. +// End contains stats about RPC completion. type End struct { // Client is true if this End is from client side. Client bool @@ -238,7 +249,7 @@ type ConnStats interface { IsClient() bool } -// ConnBegin contains the stats of a connection when it is established. +// ConnBegin contains stats about connection establishment. type ConnBegin struct { // Client is true if this ConnBegin is from client side. Client bool @@ -249,7 +260,7 @@ func (s *ConnBegin) IsClient() bool { return s.Client } func (s *ConnBegin) isConnStats() {} -// ConnEnd contains the stats of a connection when it ends. +// ConnEnd contains stats about connection termination. type ConnEnd struct { // Client is true if this ConnEnd is from client side. Client bool diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/stream.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/stream.go index 17e2267b3..12163150b 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/stream.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/stream.go @@ -258,9 +258,9 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), opts ...CallOption) (_ iresolver.ClientStream, err error) { - c := defaultCallInfo() + callInfo := defaultCallInfo() if mc.WaitForReady != nil { - c.failFast = !*mc.WaitForReady + callInfo.failFast = !*mc.WaitForReady } // Possible context leak: @@ -281,20 +281,20 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client }() for _, o := range opts { - if err := o.before(c); err != nil { + if err := o.before(callInfo); err != nil { return nil, toRPCErr(err) } } - c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) - c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) - if err := setCallInfoCodec(c); err != nil { + callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize) + callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(callInfo); err != nil { return nil, err } callHdr := &transport.CallHdr{ Host: cc.authority, Method: method, - ContentSubtype: c.contentSubtype, + ContentSubtype: callInfo.contentSubtype, DoneFunc: doneFunc, } @@ -302,22 +302,22 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client // set. In that case, also find the compressor from the encoding package. // Otherwise, use the compressor configured by the WithCompressor DialOption, // if set. - var cp Compressor - var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + var compressorV0 Compressor + var compressorV1 encoding.Compressor + if ct := callInfo.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { - comp = encoding.GetCompressor(ct) - if comp == nil { + compressorV1 = encoding.GetCompressor(ct) + if compressorV1 == nil { return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - cp = cc.dopts.cp + } else if cc.dopts.compressorV0 != nil { + callHdr.SendCompress = cc.dopts.compressorV0.Type() + compressorV0 = cc.dopts.compressorV0 } - if c.creds != nil { - callHdr.Creds = c.creds + if callInfo.creds != nil { + callHdr.Creds = callInfo.creds } cs := &clientStream{ @@ -325,12 +325,12 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client ctx: ctx, methodConfig: &mc, opts: opts, - callInfo: c, + callInfo: callInfo, cc: cc, desc: desc, - codec: c.codec, - cp: cp, - comp: comp, + codec: callInfo.codec, + compressorV0: compressorV0, + compressorV1: compressorV1, cancel: cancel, firstAttempt: true, onCommit: onCommit, @@ -412,7 +412,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) return nil, ErrClientConnClosing } - ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.cp, cs.comp) + ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1) method := cs.callHdr.Method var beginTime time.Time shs := cs.cc.dopts.copts.StatsHandlers @@ -454,12 +454,12 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) } return &csAttempt{ - ctx: ctx, - beginTime: beginTime, - cs: cs, - dc: cs.cc.dopts.dc, - statsHandlers: shs, - trInfo: trInfo, + ctx: ctx, + beginTime: beginTime, + cs: cs, + decompressorV0: cs.cc.dopts.dc, + statsHandlers: shs, + trInfo: trInfo, }, nil } @@ -467,7 +467,7 @@ func (a *csAttempt) getTransport() error { cs := a.cs var err error - a.t, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) + a.transport, a.pickResult, err = cs.cc.getTransport(a.ctx, cs.callInfo.failFast, cs.callHdr.Method) if err != nil { if de, ok := err.(dropError); ok { err = de.error @@ -476,7 +476,7 @@ func (a *csAttempt) getTransport() error { return err } if a.trInfo != nil { - a.trInfo.firstLine.SetRemoteAddr(a.t.RemoteAddr()) + a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr()) } return nil } @@ -503,7 +503,7 @@ func (a *csAttempt) newStream() error { a.ctx = metadata.NewOutgoingContext(a.ctx, md) } - s, err := a.t.NewStream(a.ctx, cs.callHdr) + s, err := a.transport.NewStream(a.ctx, cs.callHdr) if err != nil { nse, ok := err.(*transport.NewStreamError) if !ok { @@ -518,9 +518,9 @@ func (a *csAttempt) newStream() error { // Unwrap and convert error. return toRPCErr(nse.Err) } - a.s = s + a.transportStream = s a.ctx = s.Context() - a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} + a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool} return nil } @@ -532,9 +532,9 @@ type clientStream struct { cc *ClientConn desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor + codec baseCodec + compressorV0 Compressor + compressorV1 encoding.Compressor cancel context.CancelFunc // cancels all attempts @@ -583,17 +583,17 @@ type replayOp struct { // csAttempt implements a single transport stream attempt within a // clientStream. type csAttempt struct { - ctx context.Context - cs *clientStream - t transport.ClientTransport - s *transport.ClientStream - p *parser - pickResult balancer.PickResult + ctx context.Context + cs *clientStream + transport transport.ClientTransport + transportStream *transport.ClientStream + parser *parser + pickResult balancer.PickResult - finished bool - dc Decompressor - decomp encoding.Compressor - decompSet bool + finished bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + decompressorSet bool mu sync.Mutex // guards trInfo.tr // trInfo may be nil (if EnableTracing is false). @@ -639,14 +639,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { // RPC is finished or committed or was dropped by the picker; cannot retry. return false, err } - if a.s == nil && a.allowTransparentRetry { + if a.transportStream == nil && a.allowTransparentRetry { return true, nil } // Wait for the trailers. unprocessed := false - if a.s != nil { - <-a.s.Done() - unprocessed = a.s.Unprocessed() + if a.transportStream != nil { + <-a.transportStream.Done() + unprocessed = a.transportStream.Unprocessed() } if cs.firstAttempt && unprocessed { // First attempt, stream unprocessed: transparently retry. @@ -658,14 +658,14 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { pushback := 0 hasPushback := false - if a.s != nil { - if !a.s.TrailersOnly() { + if a.transportStream != nil { + if !a.transportStream.TrailersOnly() { return false, err } // TODO(retry): Move down if the spec changes to not check server pushback // before considering this a failure for throttling. - sps := a.s.Trailer()["grpc-retry-pushback-ms"] + sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"] if len(sps) == 1 { var e error if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 { @@ -682,8 +682,8 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) { } var code codes.Code - if a.s != nil { - code = a.s.Status().Code() + if a.transportStream != nil { + code = a.transportStream.Status().Code() } else { code = status.Code(err) } @@ -756,8 +756,8 @@ func (cs *clientStream) Context() context.Context { cs.commitAttempt() // No need to lock before using attempt, since we know it is committed and // cannot change. - if cs.attempt.s != nil { - return cs.attempt.s.Context() + if cs.attempt.transportStream != nil { + return cs.attempt.transportStream.Context() } return cs.ctx } @@ -794,9 +794,9 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) continue } if err == io.EOF { - <-a.s.Done() + <-a.transportStream.Done() } - if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) { + if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) { onSuccess() cs.mu.Unlock() return err @@ -812,7 +812,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD err := cs.withRetry(func(a *csAttempt) error { var err error - m, err = a.s.Header() + m, err = a.transportStream.Header() return toRPCErr(err) }, cs.commitAttemptLocked) @@ -856,10 +856,10 @@ func (cs *clientStream) Trailer() metadata.MD { // directions -- it will prevent races and should not meaningfully impact // performance. cs.commitAttempt() - if cs.attempt.s == nil { + if cs.attempt.transportStream == nil { return nil } - return cs.attempt.s.Trailer() + return cs.attempt.transportStream.Trailer() } func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error { @@ -904,7 +904,7 @@ func (cs *clientStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool) if err != nil { return err } @@ -992,7 +992,7 @@ func (cs *clientStream) CloseSend() error { } cs.sentLast = true op := func(a *csAttempt) error { - a.s.Write(nil, nil, &transport.WriteOptions{Last: true}) + a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1030,7 +1030,7 @@ func (cs *clientStream) finish(err error) { if cs.attempt != nil { cs.attempt.finish(err) // after functions all rely upon having a stream. - if cs.attempt.s != nil { + if cs.attempt.transportStream != nil { for _, o := range cs.opts { o.after(cs.callInfo, cs.attempt) } @@ -1084,7 +1084,7 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength } a.mu.Unlock() } - if err := a.s.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { + if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil { if !cs.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1108,25 +1108,25 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { defer payInfo.free() } - if !a.decompSet { + if !a.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if a.dc == nil || a.dc.Type() != ct { + if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.decompressorV0 == nil || a.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - a.dc = nil - a.decomp = encoding.GetCompressor(ct) + a.decompressorV0 = nil + a.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - a.dc = nil + a.decompressorV0 = nil } // Only initialize this state once per stream. - a.decompSet = true + a.decompressorSet = true } - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil { + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := a.s.Status().Err(); statusErr != nil { + if statusErr := a.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1157,8 +1157,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { } // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF { - return a.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF { + return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } @@ -1177,20 +1177,20 @@ func (a *csAttempt) finish(err error) { err = nil } var tr metadata.MD - if a.s != nil { - a.s.Close(err) - tr = a.s.Trailer() + if a.transportStream != nil { + a.transportStream.Close(err) + tr = a.transportStream.Trailer() } if a.pickResult.Done != nil { br := false - if a.s != nil { - br = a.s.BytesReceived() + if a.transportStream != nil { + br = a.transportStream.BytesReceived() } a.pickResult.Done(balancer.DoneInfo{ Err: err, Trailer: tr, - BytesSent: a.s != nil, + BytesSent: a.transportStream != nil, BytesReceived: br, ServerLoad: balancerload.Parse(tr), }) @@ -1272,7 +1272,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // if set. var cp Compressor var comp encoding.Compressor - if ct := c.compressorType; ct != "" { + if ct := c.compressorName; ct != "" { callHdr.SendCompress = ct if ct != encoding.Identity { comp = encoding.GetCompressor(ct) @@ -1280,9 +1280,9 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) } } - } else if ac.cc.dopts.cp != nil { - callHdr.SendCompress = ac.cc.dopts.cp.Type() - cp = ac.cc.dopts.cp + } else if ac.cc.dopts.compressorV0 != nil { + callHdr.SendCompress = ac.cc.dopts.compressorV0.Type() + cp = ac.cc.dopts.compressorV0 } if c.creds != nil { callHdr.Creds = c.creds @@ -1290,26 +1290,26 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin // Use a special addrConnStream to avoid retry. as := &addrConnStream{ - callHdr: callHdr, - ac: ac, - ctx: ctx, - cancel: cancel, - opts: opts, - callInfo: c, - desc: desc, - codec: c.codec, - cp: cp, - comp: comp, - t: t, + callHdr: callHdr, + ac: ac, + ctx: ctx, + cancel: cancel, + opts: opts, + callInfo: c, + desc: desc, + codec: c.codec, + sendCompressorV0: cp, + sendCompressorV1: comp, + transport: t, } - s, err := as.t.NewStream(as.ctx, as.callHdr) + s, err := as.transport.NewStream(as.ctx, as.callHdr) if err != nil { err = toRPCErr(err) return nil, err } - as.s = s - as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} + as.transportStream = s + as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { // Listen on stream context to cleanup when the stream context is @@ -1335,29 +1335,31 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin } type addrConnStream struct { - s *transport.ClientStream - ac *addrConn - callHdr *transport.CallHdr - cancel context.CancelFunc - opts []CallOption - callInfo *callInfo - t transport.ClientTransport - ctx context.Context - sentLast bool - desc *StreamDesc - codec baseCodec - cp Compressor - comp encoding.Compressor - decompSet bool - dc Decompressor - decomp encoding.Compressor - p *parser - mu sync.Mutex - finished bool + transportStream *transport.ClientStream + ac *addrConn + callHdr *transport.CallHdr + cancel context.CancelFunc + opts []CallOption + callInfo *callInfo + transport transport.ClientTransport + ctx context.Context + sentLast bool + desc *StreamDesc + codec baseCodec + sendCompressorV0 Compressor + sendCompressorV1 encoding.Compressor + decompressorSet bool + decompressorV0 Decompressor + decompressorV1 encoding.Compressor + parser *parser + + // mu guards finished and is held for the entire finish method. + mu sync.Mutex + finished bool } func (as *addrConnStream) Header() (metadata.MD, error) { - m, err := as.s.Header() + m, err := as.transportStream.Header() if err != nil { as.finish(toRPCErr(err)) } @@ -1365,7 +1367,7 @@ func (as *addrConnStream) Header() (metadata.MD, error) { } func (as *addrConnStream) Trailer() metadata.MD { - return as.s.Trailer() + return as.transportStream.Trailer() } func (as *addrConnStream) CloseSend() error { @@ -1375,7 +1377,7 @@ func (as *addrConnStream) CloseSend() error { } as.sentLast = true - as.s.Write(nil, nil, &transport.WriteOptions{Last: true}) + as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true}) // Always return nil; io.EOF is the only error that might make sense // instead, but there is no need to signal the client to call RecvMsg // as the only use left for the stream after CloseSend is to call @@ -1384,7 +1386,7 @@ func (as *addrConnStream) CloseSend() error { } func (as *addrConnStream) Context() context.Context { - return as.s.Context() + return as.transportStream.Context() } func (as *addrConnStream) SendMsg(m any) (err error) { @@ -1406,7 +1408,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool) + hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool) if err != nil { return err } @@ -1425,7 +1427,7 @@ func (as *addrConnStream) SendMsg(m any) (err error) { return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize) } - if err := as.s.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { + if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil { if !as.desc.ClientStreams { // For non-client-streaming RPCs, we return nil instead of EOF on error // because the generated code requires it. finish is not called; RecvMsg() @@ -1446,25 +1448,25 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { } }() - if !as.decompSet { + if !as.decompressorSet { // Block until we receive headers containing received message encoding. - if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity { - if as.dc == nil || as.dc.Type() != ct { + if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity { + if as.decompressorV0 == nil || as.decompressorV0.Type() != ct { // No configured decompressor, or it does not match the incoming // message encoding; attempt to find a registered compressor that does. - as.dc = nil - as.decomp = encoding.GetCompressor(ct) + as.decompressorV0 = nil + as.decompressorV1 = encoding.GetCompressor(ct) } } else { // No compression is used; disable our decompressor. - as.dc = nil + as.decompressorV0 = nil } // Only initialize this state once per stream. - as.decompSet = true + as.decompressorSet = true } - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil { + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil { if err == io.EOF { - if statusErr := as.s.Status().Err(); statusErr != nil { + if statusErr := as.transportStream.Status().Err(); statusErr != nil { return statusErr } return io.EOF // indicates successful end of stream. @@ -1479,8 +1481,8 @@ func (as *addrConnStream) RecvMsg(m any) (err error) { // Special handling for non-server-stream rpcs. // This recv expects EOF or errors, so we don't collect inPayload. - if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF { - return as.s.Status().Err() // non-server streaming Recv returns nil on success + if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF { + return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success } else if err != nil { return toRPCErr(err) } @@ -1498,8 +1500,8 @@ func (as *addrConnStream) finish(err error) { // Ending a stream with EOF indicates a success. err = nil } - if as.s != nil { - as.s.Close(err) + if as.transportStream != nil { + as.transportStream.Close(err) } if err != nil { @@ -1570,10 +1572,10 @@ type serverStream struct { p *parser codec baseCodec - cp Compressor - dc Decompressor - comp encoding.Compressor - decomp encoding.Compressor + compressorV0 Compressor + compressorV1 encoding.Compressor + decompressorV0 Decompressor + decompressorV1 encoding.Compressor sendCompressorName string @@ -1669,12 +1671,12 @@ func (ss *serverStream) SendMsg(m any) (err error) { // Server handler could have set new compressor by calling SetSendCompressor. // In case it is set, we need to use it for compressing outbound message. if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName { - ss.comp = encoding.GetCompressor(sendCompressorsName) + ss.compressorV1 = encoding.GetCompressor(sendCompressorsName) ss.sendCompressorName = sendCompressorsName } // load hdr, payload, data - hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool) + hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool) if err != nil { return err } @@ -1755,7 +1757,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { payInfo = &payloadInfo{} defer payInfo.free() } - if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil { + if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil { if err == io.EOF { if len(ss.binlogs) != 0 { chc := &binarylog.ClientHalfClose{} @@ -1766,7 +1768,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) { return err } if err == io.ErrUnexpectedEOF { - err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error()) } return toRPCErr(err) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/grpc/version.go b/src/cmd/linuxkit/vendor/google.golang.org/grpc/version.go index d2bba7f3d..2bae4db89 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/grpc/version.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.69.4" +const Version = "1.72.2" diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index cffdfda96..737d6876d 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacyWeak { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } if fd == nil { // Field is unknown. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index d972a3d98..b53805056 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacyWeak { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb index 5a57ef6f3..323829da1 100644 Binary files a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb and b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb differ diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c7604..669133d04 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 378b826fa..688aabe43 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -275,7 +274,6 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields @@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } @@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 67a51b327..d4c94458b 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) case genid.FieldOptions_Lazy_field_number: fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 10132c9b3..b08b71830 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -69,6 +69,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value case genid.FeatureSet_JsonFormat_field_number: parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value + case genid.FeatureSet_EnforceNamingStyle_field_number: + // EnforceNamingStyle is enforced in protoc, languages other than C++ + // are not supposed to do anything with this feature. default: panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num)) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filetype/build.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea44..e1b4130bd 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/flags/flags.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/flags/flags.go index 5cb3ee70f..a06ccabc2 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // @@ -22,8 +22,3 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy - -// ProtoLegacyWeak specifies whether to enable support for weak fields. -// This flag was split out of ProtoLegacy in preparation for removing -// support for weak fields (independent of the other protolegacy features). -const ProtoLegacyWeak = ProtoLegacy diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go index f30ab6b58..39524782a 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go @@ -1014,6 +1014,7 @@ const ( FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation" FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding" FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format" + FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style" FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence" FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type" @@ -1021,6 +1022,7 @@ const ( FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation" FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding" FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format" + FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style" ) // Field numbers for google.protobuf.FeatureSet. @@ -1031,6 +1033,7 @@ const ( FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4 FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5 FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6 + FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7 ) // Full and short names for google.protobuf.FeatureSet.FieldPresence. @@ -1112,6 +1115,19 @@ const ( FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2 ) +// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle" + FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle" +) + +// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle. +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0 + FeatureSet_STYLE2024_enum_value = 1 + FeatureSet_STYLE_LEGACY_enum_value = 2 +) + // Names for google.protobuf.FeatureSetDefaults. const ( FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults" diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/goname.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e1..99bb95baf 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 7c1f66c8c..d14d7d93c 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 111d95833..f78b57b04 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -119,9 +119,6 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { } case isOneof: fieldOffset = offsetOf(fs) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) default: fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index f81d7d0db..41c1f74ef 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -46,9 +46,6 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): fieldOffset = offsetOf(fs) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) case fd.Message() != nil && !fd.IsMap(): fieldOffset = offsetOf(fs) if fd.IsList() { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/lazy.go index e8fb6c35b..c7de31e24 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/lazy.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ fmi := f.validation.mi if fmi == nil { fd := mi.Desc.Fields().ByNumber(f.num) - if fd == nil || !fd.IsWeak() { + if fd == nil { return out, ValidationUnknown } messageName := fd.Message().FullName() diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049b..a51dffbe2 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message.go index d1f79b422..d50423dcb 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -120,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -129,8 +127,6 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset @@ -148,7 +144,6 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, lazyOffset: invalidOffset, @@ -168,11 +163,6 @@ fieldLoop: si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f) @@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d8dcd7886..dd55e8e00 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool { usePresence, _ := usePresenceForField(si, fd) switch { - case fd.IsWeak(): - // Weak fields are no different for opaque. - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): // Oneofs are no different for opaque. fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) @@ -620,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) ( switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): return false, false - case fd.IsWeak(): - return false, false case fd.IsMap(): return false, false case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 31c19b54f..0d20132fa 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 3cd1fbc21..68d4ae32e 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -332,79 +329,6 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacyWeak { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 6bed45e35..62f8bf663 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/validate.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/validate.go index b534a3d6d..7b2995dde 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -320,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/weak.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba9..000000000 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go similarity index 99% rename from src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go rename to src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go index 1ffddf687..42dd6f70c 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package strs import ( diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go deleted file mode 100644 index 832a7988f..000000000 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package strs - -import ( - "unsafe" - - "google.golang.org/protobuf/reflect/protoreflect" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } -) - -// UnsafeString returns an unsafe string reference of b. -// The caller must treat the input slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user -// unless the input slice is provably immutable. -func UnsafeString(b []byte) (s string) { - src := (*sliceHeader)(unsafe.Pointer(&b)) - dst := (*stringHeader)(unsafe.Pointer(&s)) - dst.Data = src.Data - dst.Len = src.Len - return s -} - -// UnsafeBytes returns an unsafe bytes slice reference of s. -// The caller must treat returned slice as immutable. -// -// WARNING: Use carefully. The returned result must not leak to the end user. -func UnsafeBytes(s string) (b []byte) { - src := (*stringHeader)(unsafe.Pointer(&s)) - dst := (*sliceHeader)(unsafe.Pointer(&b)) - dst.Data = src.Data - dst.Len = src.Len - dst.Cap = src.Len - return b -} - -// Builder builds a set of strings with shared lifetime. -// This differs from strings.Builder, which is for building a single string. -type Builder struct { - buf []byte -} - -// AppendFullName is equivalent to protoreflect.FullName.Append, -// but optimized for large batches where each name has a shared lifetime. -func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName { - n := len(prefix) + len(".") + len(name) - if len(prefix) == 0 { - n -= len(".") - } - sb.grow(n) - sb.buf = append(sb.buf, prefix...) - sb.buf = append(sb.buf, '.') - sb.buf = append(sb.buf, name...) - return protoreflect.FullName(sb.last(n)) -} - -// MakeString is equivalent to string(b), but optimized for large batches -// with a shared lifetime. -func (sb *Builder) MakeString(b []byte) string { - sb.grow(len(b)) - sb.buf = append(sb.buf, b...) - return sb.last(len(b)) -} - -func (sb *Builder) grow(n int) { - if cap(sb.buf)-len(sb.buf) >= n { - return - } - - // Unlike strings.Builder, we do not need to copy over the contents - // of the old buffer since our builder provides no API for - // retrieving previously created strings. - sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n)) -} - -func (sb *Builder) last(n int) string { - return UnsafeString(sb.buf[len(sb.buf)-n:]) -} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/version/version.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/version/version.go index f5c06280f..aac1cb18a 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 3 + Patch = 6 PreRelease = "" ) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/decode.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/decode.go index e28d7acb3..4cbf1aeaf 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/decode.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacyWeak { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/merge.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/merge.go index 3c6fe5780..ef55b97dd 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/merge.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/proto/merge.go @@ -59,6 +59,12 @@ func Clone(m Message) Message { return dst.Interface() } +// CloneOf returns a deep copy of m. If the top-level message is invalid, +// it returns an invalid message as well. +func CloneOf[M Message](m M) M { + return Clone(m).(M) +} + // mergeOptions provides a namespace for merge functions, and can be // exported in the future if we add user-visible merge options. type mergeOptions struct{} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 69a050509..823dbf3ba 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -132,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index ebcb4a8ab..9da34998b 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -149,7 +149,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab29..ff692436e 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { @@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 5eaf65217..c343d9227 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacyWeak { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } @@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } @@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index f55b03695..697a61b29 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" @@ -128,23 +129,39 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp // We must not use proto.GetExtension(child, gofeaturespb.E_Go) // because that only works for messages we generated, but not for // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) if !goFeatures.IsValid() { return parentFS } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. - gf := goFeatures.Message() fields := gf.Descriptor().Fields() - if fd := fields.ByName("legacy_unmarshal_json_enum"); gf.Has(fd) { + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() } - if fd := fields.ByName("strip_enum_prefix"); gf.Has(fd) { + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) } - if fd := fields.ByName("api_level"); gf.Has(fd) { + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { parentFS.APILevel = int(gf.Get(fd).Enum()) } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d400..9b880aa8c 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go index ea154eec4..a4a0a2971 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go @@ -398,6 +398,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte { b = p.appendSingularField(b, "message_encoding", nil) case 6: b = p.appendSingularField(b, "json_format", nil) + case 7: + b = p.appendSingularField(b, "enforce_naming_style", nil) } return b } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadbaf..cd7fbc87a 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go similarity index 99% rename from src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go rename to src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go index 479527b58..fe17f3722 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.21 - package protoreflect import ( diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go deleted file mode 100644 index 0015fcb35..000000000 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.21 - -package protoreflect - -import ( - "unsafe" - - "google.golang.org/protobuf/internal/pragma" -) - -type ( - stringHeader struct { - Data unsafe.Pointer - Len int - } - sliceHeader struct { - Data unsafe.Pointer - Len int - Cap int - } - ifaceHeader struct { - Type unsafe.Pointer - Data unsafe.Pointer - } -) - -var ( - nilType = typeOf(nil) - boolType = typeOf(*new(bool)) - int32Type = typeOf(*new(int32)) - int64Type = typeOf(*new(int64)) - uint32Type = typeOf(*new(uint32)) - uint64Type = typeOf(*new(uint64)) - float32Type = typeOf(*new(float32)) - float64Type = typeOf(*new(float64)) - stringType = typeOf(*new(string)) - bytesType = typeOf(*new([]byte)) - enumType = typeOf(*new(EnumNumber)) -) - -// typeOf returns a pointer to the Go type information. -// The pointer is comparable and equal if and only if the types are identical. -func typeOf(t any) unsafe.Pointer { - return (*ifaceHeader)(unsafe.Pointer(&t)).Type -} - -// value is a union where only one type can be represented at a time. -// The struct is 24B large on 64-bit systems and requires the minimum storage -// necessary to represent each possible type. -// -// The Go GC needs to be able to scan variables containing pointers. -// As such, pointers and non-pointers cannot be intermixed. -type value struct { - pragma.DoNotCompare // 0B - - // typ stores the type of the value as a pointer to the Go type. - typ unsafe.Pointer // 8B - - // ptr stores the data pointer for a String, Bytes, or interface value. - ptr unsafe.Pointer // 8B - - // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or - // Enum value as a raw uint64. - // - // It is also used to store the length of a String or Bytes value; - // the capacity is ignored. - num uint64 // 8B -} - -func valueOfString(v string) Value { - p := (*stringHeader)(unsafe.Pointer(&v)) - return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfBytes(v []byte) Value { - p := (*sliceHeader)(unsafe.Pointer(&v)) - return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} -} -func valueOfIface(v any) Value { - p := (*ifaceHeader)(unsafe.Pointer(&v)) - return Value{typ: p.Type, ptr: p.Data} -} - -func (v Value) getString() (x string) { - *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} - return x -} -func (v Value) getBytes() (x []byte) { - *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} - return x -} -func (v Value) getIface() (x any) { - *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} - return x -} diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a551e7ae9..7fe280f19 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -1138,6 +1139,65 @@ func (FeatureSet_JsonFormat) EnumDescriptor() ([]byte, []int) { return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5} } +type FeatureSet_EnforceNamingStyle int32 + +const ( + FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0 + FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1 + FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2 +) + +// Enum value maps for FeatureSet_EnforceNamingStyle. +var ( + FeatureSet_EnforceNamingStyle_name = map[int32]string{ + 0: "ENFORCE_NAMING_STYLE_UNKNOWN", + 1: "STYLE2024", + 2: "STYLE_LEGACY", + } + FeatureSet_EnforceNamingStyle_value = map[string]int32{ + "ENFORCE_NAMING_STYLE_UNKNOWN": 0, + "STYLE2024": 1, + "STYLE_LEGACY": 2, + } +) + +func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle { + p := new(FeatureSet_EnforceNamingStyle) + *p = x + return p +} + +func (x FeatureSet_EnforceNamingStyle) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor { + return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() +} + +func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType { + return &file_google_protobuf_descriptor_proto_enumTypes[16] +} + +func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = FeatureSet_EnforceNamingStyle(num) + return nil +} + +// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead. +func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) { + return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6} +} + // Represents the identified object's effect on the element in the original // .proto file. type GeneratedCodeInfo_Annotation_Semantic int32 @@ -1176,11 +1236,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string { } func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor { - return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor() + return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor() } func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType { - return &file_google_protobuf_descriptor_proto_enumTypes[16] + return &file_google_protobuf_descriptor_proto_enumTypes[17] } func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber { @@ -1276,8 +1336,14 @@ type FileDescriptorProto struct { // The supported values are "proto2", "proto3", and "editions". // // If `edition` is present, this value must be "editions". + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` // The edition of the proto file. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -2211,6 +2277,9 @@ type FileOptions struct { // determining the ruby package. RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. // See the documentation for the "Options" section above. @@ -2481,6 +2550,9 @@ type MessageOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2647,6 +2719,9 @@ type FieldOptions struct { Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"` EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"` FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"` // The parser stores options it doesn't recognize here. See above. @@ -2798,6 +2873,9 @@ func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { type OneofOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2870,6 +2948,9 @@ type EnumOptions struct { // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto. DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -2957,6 +3038,9 @@ type EnumValueOptions struct { // this is a formalization for deprecating enum values. Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"` // Indicate that fields annotated with this enum value should not be printed // out when using debug formats, e.g. when the field contains sensitive @@ -3045,6 +3129,9 @@ func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { type ServiceOptions struct { state protoimpl.MessageState `protogen:"open.v1"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"` // Is this service deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -3123,6 +3210,9 @@ type MethodOptions struct { Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // Any features defined in the specific edition. + // WARNING: This field should only be used by protobuf plugins or special + // cases like the proto compiler. Other uses are discouraged and + // developers should rely on the protoreflect APIs for their client language. Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` @@ -3309,6 +3399,7 @@ type FeatureSet struct { Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"` MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"` JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"` + EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"` extensionFields protoimpl.ExtensionFields unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -3386,6 +3477,13 @@ func (x *FeatureSet) GetJsonFormat() FeatureSet_JsonFormat { return FeatureSet_JSON_FORMAT_UNKNOWN } +func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle { + if x != nil && x.EnforceNamingStyle != nil { + return *x.EnforceNamingStyle + } + return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN +} + // A compiled specification for the defaults of a set of features. These // messages are generated from FeatureSet extensions and can be used to seed // feature resolution. The resolution with this object becomes a simple search @@ -4360,791 +4458,381 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, - 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, - 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, - 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28, - 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, - 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65, - 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65, - 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07, - 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, - 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, - 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65, - 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, - 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64, - 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, - 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, - 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, - 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37, - 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, - 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64, - 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, - 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, - 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a, - 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02, - 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45, - 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55, - 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e, - 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b, - 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a, - 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a, - 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f, - 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, - 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, - 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, - 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, - 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, - 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a, - 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a, - 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a, - 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f, - 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, - 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54, - 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, - 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12, - 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, - 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, - 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e, - 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, - 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, - 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, - 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, - 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, - 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d, - 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b, - 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16, - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, - 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65, - 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, - 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, - 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a, - 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, - 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, - 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74, - 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61, - 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d, - 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61, - 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a, - 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65, - 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, - 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48, - 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20, - 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12, - 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, - 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, - 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b, - 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, - 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, - 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61, - 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, - 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65, - 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e, - 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, - 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, - 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65, - 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73, - 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, - 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77, - 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, - 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16, - 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, - 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61, - 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, - 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45, - 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45, - 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, - 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, - 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70, - 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, - 0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, - 0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, - 0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, - 0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, - 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, - 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, - 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, - 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, - 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, - 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, - 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, - 0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, - 0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, - 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, - 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, - 0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, - 0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, - 0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, - 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, - 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, - 0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, - 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, - 0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, - 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, - 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a, - 0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a, - 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a, - 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75, - 0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77, - 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, - 0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, - 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, - 0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, - 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, - 0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, - 0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, - 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, - 0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, - 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, - 0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, - 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, - 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, - 0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, - 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, - 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, - 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, - 0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, - 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, - 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, - 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, - 0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, - 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, - 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, - 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, - 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, - 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, - 0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, - 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, - 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, - 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, - 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, - 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, - 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, - 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, - 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10, - 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, - 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, - 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, - 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, - 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, - 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, - 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99, - 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, - 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70, - 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, - 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, - 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, - 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, - 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a, - 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, - 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59, - 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, - 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12, - 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a, - 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55, - 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, - 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61, - 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, - 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67, - 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e, - 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f, - 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65, - 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, - 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, - 0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, - 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e, - 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, - 0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, - 0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12, - 0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, - 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, - 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88, - 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, - 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65, - 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, - 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2, - 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, - 0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, - 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, - 0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, - 0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, - 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, - 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, - 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73, - 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, - 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45, - 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, - 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, - 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10, - 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55, - 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, - 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45, - 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22, - 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, - 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, - 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, - 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46, - 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, - 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, - 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01, - 0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, - 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, - 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, - 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, - 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, - 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, - 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, - 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, - 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, - 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, - 0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, - 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, - 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, - 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65, - 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, - 0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, - 0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78, - 0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d, - 0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08, - 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, - 0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, - 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, - 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, - 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, - 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, - 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08, - 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11, - 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, - 0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, - 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, - 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, - 0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, - 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, - 0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, - 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, - 0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7, - 0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, - 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43, - 0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, - 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, - 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, - 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, - 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, - 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, - 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, - 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, - 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, - 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, - 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, - 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, - 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, - 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, - 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, - 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, - 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} +const file_google_protobuf_descriptor_proto_rawDesc = "" + + "\n" + + " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" + + "\x11FileDescriptorSet\x128\n" + + "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" + + "\x13FileDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" + + "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" + + "\n" + + "dependency\x18\x03 \x03(\tR\n" + + "dependency\x12+\n" + + "\x11public_dependency\x18\n" + + " \x03(\x05R\x10publicDependency\x12'\n" + + "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" + + "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" + + "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" + + "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" + + "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" + + "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" + + "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" + + "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" + + "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" + + "\x0fDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" + + "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" + + "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" + + "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" + + "nestedType\x12A\n" + + "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" + + "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" + + "\n" + + "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" + + "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" + + "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\n" + + " \x03(\tR\freservedName\x1az\n" + + "\x0eExtensionRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" + + "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" + + "\rReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" + + "\x15ExtensionRangeOptions\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" + + "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" + + "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" + + "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" + + "\vDeclaration\x12\x16\n" + + "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" + + "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" + + "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" + + "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" + + "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" + + "\x11VerificationState\x12\x0f\n" + + "\vDECLARATION\x10\x00\x12\x0e\n" + + "\n" + + "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" + + "\x14FieldDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" + + "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" + + "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" + + "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" + + "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" + + "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" + + "\voneof_index\x18\t \x01(\x05R\n" + + "oneofIndex\x12\x1b\n" + + "\tjson_name\x18\n" + + " \x01(\tR\bjsonName\x127\n" + + "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" + + "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" + + "\x04Type\x12\x0f\n" + + "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" + + "\n" + + "TYPE_FLOAT\x10\x02\x12\x0e\n" + + "\n" + + "TYPE_INT64\x10\x03\x12\x0f\n" + + "\vTYPE_UINT64\x10\x04\x12\x0e\n" + + "\n" + + "TYPE_INT32\x10\x05\x12\x10\n" + + "\fTYPE_FIXED64\x10\x06\x12\x10\n" + + "\fTYPE_FIXED32\x10\a\x12\r\n" + + "\tTYPE_BOOL\x10\b\x12\x0f\n" + + "\vTYPE_STRING\x10\t\x12\x0e\n" + + "\n" + + "TYPE_GROUP\x10\n" + + "\x12\x10\n" + + "\fTYPE_MESSAGE\x10\v\x12\x0e\n" + + "\n" + + "TYPE_BYTES\x10\f\x12\x0f\n" + + "\vTYPE_UINT32\x10\r\x12\r\n" + + "\tTYPE_ENUM\x10\x0e\x12\x11\n" + + "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" + + "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" + + "\vTYPE_SINT32\x10\x11\x12\x0f\n" + + "\vTYPE_SINT64\x10\x12\"C\n" + + "\x05Label\x12\x12\n" + + "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" + + "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" + + "\x0eLABEL_REQUIRED\x10\x02\"c\n" + + "\x14OneofDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x127\n" + + "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" + + "\x13EnumDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" + + "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" + + "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" + + "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" + + "\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" + + "\x11EnumReservedRange\x12\x14\n" + + "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" + + "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" + + "\x18EnumValueDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" + + "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" + + "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" + + "\x16ServiceDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" + + "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" + + "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" + + "\x15MethodDescriptorProto\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" + + "\n" + + "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" + + "\voutput_type\x18\x03 \x01(\tR\n" + + "outputType\x128\n" + + "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" + + "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" + + "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" + + "\vFileOptions\x12!\n" + + "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" + + "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" + + "\x13java_multiple_files\x18\n" + + " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" + + "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" + + "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" + + "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" + + "\n" + + "go_package\x18\v \x01(\tR\tgoPackage\x125\n" + + "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" + + "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" + + "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" + + "\n" + + "deprecated\x18\x17 \x01(\b:\x05falseR\n" + + "deprecated\x12.\n" + + "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" + + "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" + + "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" + + "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" + + "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" + + "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" + + "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" + + "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" + + "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" + + "\fOptimizeMode\x12\t\n" + + "\x05SPEED\x10\x01\x12\r\n" + + "\tCODE_SIZE\x10\x02\x12\x10\n" + + "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" + + "\x0eMessageOptions\x12<\n" + + "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" + + "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x1b\n" + + "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" + + "\"\x9d\r\n" + + "\fFieldOptions\x12A\n" + + "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" + + "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" + + "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" + + "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" + + "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12\x19\n" + + "\x04weak\x18\n" + + " \x01(\b:\x05falseR\x04weak\x12(\n" + + "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" + + "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" + + "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" + + "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" + + "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" + + "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" + + "\x0eEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" + + "\x0eFeatureSupport\x12G\n" + + "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" + + "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" + + "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" + + "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" + + "\x05CType\x12\n" + + "\n" + + "\x06STRING\x10\x00\x12\b\n" + + "\x04CORD\x10\x01\x12\x10\n" + + "\fSTRING_PIECE\x10\x02\"5\n" + + "\x06JSType\x12\r\n" + + "\tJS_NORMAL\x10\x00\x12\r\n" + + "\tJS_STRING\x10\x01\x12\r\n" + + "\tJS_NUMBER\x10\x02\"U\n" + + "\x0fOptionRetention\x12\x15\n" + + "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" + + "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" + + "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" + + "\x10OptionTargetType\x12\x17\n" + + "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" + + "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" + + "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" + + "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" + + "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" + + "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" + + "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" + + "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" + + "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" + + "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" + + "\fOneofOptions\x127\n" + + "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" + + "\vEnumOptions\x12\x1f\n" + + "\vallow_alias\x18\x02 \x01(\bR\n" + + "allowAlias\x12%\n" + + "\n" + + "deprecated\x18\x03 \x01(\b:\x05falseR\n" + + "deprecated\x12V\n" + + "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" + + "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" + + "\x10EnumValueOptions\x12%\n" + + "\n" + + "deprecated\x18\x01 \x01(\b:\x05falseR\n" + + "deprecated\x127\n" + + "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" + + "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" + + "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" + + "\x0eServiceOptions\x127\n" + + "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" + + "\rMethodOptions\x12%\n" + + "\n" + + "deprecated\x18! \x01(\b:\x05falseR\n" + + "deprecated\x12q\n" + + "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" + + "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" + + "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" + + "\x10IdempotencyLevel\x12\x17\n" + + "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" + + "\n" + + "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" + + "\x13UninterpretedOption\x12A\n" + + "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" + + "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" + + "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" + + "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" + + "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" + + "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" + + "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" + + "\bNamePart\x12\x1b\n" + + "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" + + "\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" + + "\n" + + "FeatureSet\x12\x91\x01\n" + + "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" + + "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" + + "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" + + "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" + + "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" + + "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" + + "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" + + "jsonFormat\x12\xab\x01\n" + + "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" + + "\rFieldPresence\x12\x1a\n" + + "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" + + "\bEXPLICIT\x10\x01\x12\f\n" + + "\bIMPLICIT\x10\x02\x12\x13\n" + + "\x0fLEGACY_REQUIRED\x10\x03\"7\n" + + "\bEnumType\x12\x15\n" + + "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" + + "\x04OPEN\x10\x01\x12\n" + + "\n" + + "\x06CLOSED\x10\x02\"V\n" + + "\x15RepeatedFieldEncoding\x12#\n" + + "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06PACKED\x10\x01\x12\f\n" + + "\bEXPANDED\x10\x02\"I\n" + + "\x0eUtf8Validation\x12\x1b\n" + + "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" + + "\n" + + "\x06VERIFY\x10\x02\x12\b\n" + + "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" + + "\x0fMessageEncoding\x12\x1c\n" + + "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" + + "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" + + "\tDELIMITED\x10\x02\"H\n" + + "\n" + + "JsonFormat\x12\x17\n" + + "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" + + "\x05ALLOW\x10\x01\x12\x16\n" + + "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" + + "\x12EnforceNamingStyle\x12 \n" + + "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" + + "\tSTYLE2024\x10\x01\x12\x10\n" + + "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" + + "\x12FeatureSetDefaults\x12X\n" + + "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" + + "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" + + "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" + + "\x18FeatureSetEditionDefault\x122\n" + + "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" + + "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" + + "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" + + "\x0eSourceCodeInfo\x12D\n" + + "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" + + "\bLocation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" + + "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" + + "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" + + "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" + + "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" + + "\x11GeneratedCodeInfo\x12M\n" + + "\n" + + "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" + + "annotation\x1a\xeb\x01\n" + + "\n" + + "Annotation\x12\x16\n" + + "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" + + "\vsource_file\x18\x02 \x01(\tR\n" + + "sourceFile\x12\x14\n" + + "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" + + "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" + + "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" + + "\bSemantic\x12\b\n" + + "\x04NONE\x10\x00\x12\a\n" + + "\x03SET\x10\x01\x12\t\n" + + "\x05ALIAS\x10\x02*\xa7\x02\n" + + "\aEdition\x12\x13\n" + + "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" + + "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" + + "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" + + "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" + + "\fEDITION_2023\x10\xe8\a\x12\x11\n" + + "\fEDITION_2024\x10\xe9\a\x12\x17\n" + + "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" + + "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" + + "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" + + "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" + + "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" + + "\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" + + "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection" var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } -var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17) +var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18) var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33) var file_google_protobuf_descriptor_proto_goTypes = []any{ (Edition)(0), // 0: google.protobuf.Edition @@ -5163,124 +4851,126 @@ var file_google_protobuf_descriptor_proto_goTypes = []any{ (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat - (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic - (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet - (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto - (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto - (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions - (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto - (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto - (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto - (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto - (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto - (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto - (*FileOptions)(nil), // 27: google.protobuf.FileOptions - (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions - (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions - (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions - (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions - (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions - (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions - (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions - (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption - (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet - (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults - (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo - (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo - (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange - (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange - (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration - (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange - (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault - (*FieldOptions_FeatureSupport)(nil), // 45: google.protobuf.FieldOptions.FeatureSupport - (*UninterpretedOption_NamePart)(nil), // 46: google.protobuf.UninterpretedOption.NamePart - (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - (*SourceCodeInfo_Location)(nil), // 48: google.protobuf.SourceCodeInfo.Location - (*GeneratedCodeInfo_Annotation)(nil), // 49: google.protobuf.GeneratedCodeInfo.Annotation + (FeatureSet_EnforceNamingStyle)(0), // 16: google.protobuf.FeatureSet.EnforceNamingStyle + (GeneratedCodeInfo_Annotation_Semantic)(0), // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic + (*FileDescriptorSet)(nil), // 18: google.protobuf.FileDescriptorSet + (*FileDescriptorProto)(nil), // 19: google.protobuf.FileDescriptorProto + (*DescriptorProto)(nil), // 20: google.protobuf.DescriptorProto + (*ExtensionRangeOptions)(nil), // 21: google.protobuf.ExtensionRangeOptions + (*FieldDescriptorProto)(nil), // 22: google.protobuf.FieldDescriptorProto + (*OneofDescriptorProto)(nil), // 23: google.protobuf.OneofDescriptorProto + (*EnumDescriptorProto)(nil), // 24: google.protobuf.EnumDescriptorProto + (*EnumValueDescriptorProto)(nil), // 25: google.protobuf.EnumValueDescriptorProto + (*ServiceDescriptorProto)(nil), // 26: google.protobuf.ServiceDescriptorProto + (*MethodDescriptorProto)(nil), // 27: google.protobuf.MethodDescriptorProto + (*FileOptions)(nil), // 28: google.protobuf.FileOptions + (*MessageOptions)(nil), // 29: google.protobuf.MessageOptions + (*FieldOptions)(nil), // 30: google.protobuf.FieldOptions + (*OneofOptions)(nil), // 31: google.protobuf.OneofOptions + (*EnumOptions)(nil), // 32: google.protobuf.EnumOptions + (*EnumValueOptions)(nil), // 33: google.protobuf.EnumValueOptions + (*ServiceOptions)(nil), // 34: google.protobuf.ServiceOptions + (*MethodOptions)(nil), // 35: google.protobuf.MethodOptions + (*UninterpretedOption)(nil), // 36: google.protobuf.UninterpretedOption + (*FeatureSet)(nil), // 37: google.protobuf.FeatureSet + (*FeatureSetDefaults)(nil), // 38: google.protobuf.FeatureSetDefaults + (*SourceCodeInfo)(nil), // 39: google.protobuf.SourceCodeInfo + (*GeneratedCodeInfo)(nil), // 40: google.protobuf.GeneratedCodeInfo + (*DescriptorProto_ExtensionRange)(nil), // 41: google.protobuf.DescriptorProto.ExtensionRange + (*DescriptorProto_ReservedRange)(nil), // 42: google.protobuf.DescriptorProto.ReservedRange + (*ExtensionRangeOptions_Declaration)(nil), // 43: google.protobuf.ExtensionRangeOptions.Declaration + (*EnumDescriptorProto_EnumReservedRange)(nil), // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange + (*FieldOptions_EditionDefault)(nil), // 45: google.protobuf.FieldOptions.EditionDefault + (*FieldOptions_FeatureSupport)(nil), // 46: google.protobuf.FieldOptions.FeatureSupport + (*UninterpretedOption_NamePart)(nil), // 47: google.protobuf.UninterpretedOption.NamePart + (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + (*SourceCodeInfo_Location)(nil), // 49: google.protobuf.SourceCodeInfo.Location + (*GeneratedCodeInfo_Annotation)(nil), // 50: google.protobuf.GeneratedCodeInfo.Annotation } var file_google_protobuf_descriptor_proto_depIdxs = []int32{ - 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto - 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto - 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto - 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions - 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo + 19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto + 20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto + 24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto + 22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions + 39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo 0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition - 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto - 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto - 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto - 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto - 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange - 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto - 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions - 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange - 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration - 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet + 22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto + 22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto + 20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto + 24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto + 41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange + 23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto + 29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions + 42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange + 36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration + 37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type - 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions - 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions - 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto - 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions - 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange - 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions - 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto - 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions - 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions + 30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions + 31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions + 25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto + 32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions + 44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange + 33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions + 27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto + 34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions + 35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode - 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType - 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault - 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet - 45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport - 35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault + 37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet + 46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport + 36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption 9, // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel - 36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet - 35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption - 46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart + 37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet + 36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption + 47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart 10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence 11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType 12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding 13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation 14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding 15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat - 47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault - 0, // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition - 0, // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition - 48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location - 49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation - 20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions - 0, // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition - 0, // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition - 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition - 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition - 0, // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition - 36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet - 36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet - 16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic - 77, // [77:77] is the sub-list for method output_type - 77, // [77:77] is the sub-list for method input_type - 77, // [77:77] is the sub-list for extension type_name - 77, // [77:77] is the sub-list for extension extendee - 0, // [0:77] is the sub-list for field type_name + 16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle + 48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault + 0, // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition + 0, // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition + 49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location + 50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation + 21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions + 0, // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition + 0, // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition + 0, // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition + 0, // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition + 0, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition + 37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet + 37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet + 17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic + 78, // [78:78] is the sub-list for method output_type + 78, // [78:78] is the sub-list for method input_type + 78, // [78:78] is the sub-list for extension type_name + 78, // [78:78] is the sub-list for extension extendee + 0, // [0:78] is the sub-list for field type_name } func init() { file_google_protobuf_descriptor_proto_init() } @@ -5292,8 +4982,8 @@ func file_google_protobuf_descriptor_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, - NumEnums: 17, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), + NumEnums: 18, NumMessages: 33, NumExtensions: 0, NumServices: 0, @@ -5304,7 +4994,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index e0b72eaf9..37e712b6b 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,6 +16,7 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) type GoFeatures_APILevel int32 @@ -227,72 +228,38 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ - 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73, - 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01, - 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, - 0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, - 0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65, - 0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70, - 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, - 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61, - 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, - 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69, - 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49, - 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01, - 0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, - 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f, - 0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2, - 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, - 0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72, - 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e, - 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70, - 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98, - 0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74, - 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a, - 0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49, - 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, - 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44, - 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, - 0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, - 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50, - 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52, - 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b, - 0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45, - 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52, - 0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54, - 0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, - 0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, - 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} +const file_google_protobuf_go_features_proto_rawDesc = "" + + "\n" + + "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" + + "\n" + + "GoFeatures\x12\xbe\x01\n" + + "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" + + "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" + + "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" + + "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" + + "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" + + "\bAPILevel\x12\x19\n" + + "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" + + "\bAPI_OPEN\x10\x01\x12\x0e\n" + + "\n" + + "API_HYBRID\x10\x02\x12\x0e\n" + + "\n" + + "API_OPAQUE\x10\x03\"\x92\x01\n" + + "\x0fStripEnumPrefix\x12!\n" + + "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" + + "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" + + "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" + + "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" + + "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb" var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } @@ -326,7 +293,7 @@ func file_google_protobuf_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), NumEnums: 2, NumMessages: 1, NumExtensions: 1, @@ -339,7 +306,6 @@ func file_google_protobuf_go_features_proto_init() { ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 191552cce..1ff0d1494 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -411,32 +412,22 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, - 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, - 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, -} +const file_google_protobuf_any_proto_rawDesc = "" + + "\n" + + "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" + + "\x03Any\x12\x19\n" + + "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" + + "\x05value\x18\x02 \x01(\fR\x05valueBv\n" + + "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -462,7 +453,7 @@ func file_google_protobuf_any_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -473,7 +464,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 34d76e6cd..ca2e7b38f 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -80,6 +80,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Duration represents a signed, fixed-length span of time represented @@ -288,33 +289,22 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01, - 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, - 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, - 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_google_protobuf_duration_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" + + "\bDuration\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" + + "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc + file_google_protobuf_duration_proto_rawDescData []byte ) func file_google_protobuf_duration_proto_rawDescGZIP() []byte { file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) }) return file_google_protobuf_duration_proto_rawDescData } @@ -340,7 +330,7 @@ func file_google_protobuf_duration_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -351,7 +341,6 @@ func file_google_protobuf_duration_proto_init() { MessageInfos: file_google_protobuf_duration_proto_msgTypes, }.Build() File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil file_google_protobuf_duration_proto_goTypes = nil file_google_protobuf_duration_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 7fcdd382c..1d7ee3b47 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -38,6 +38,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // A generic empty message that you can re-use to avoid defining duplicated @@ -85,29 +86,21 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, - 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, - 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, - 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, - 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_google_protobuf_empty_proto_rawDesc = "" + + "\n" + + "\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" + + "\x05EmptyB}\n" + + "\x13com.google.protobufB\n" + + "EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc + file_google_protobuf_empty_proto_rawDescData []byte ) func file_google_protobuf_empty_proto_rawDescGZIP() []byte { file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc))) }) return file_google_protobuf_empty_proto_rawDescData } @@ -133,7 +126,7 @@ func file_google_protobuf_empty_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -144,7 +137,6 @@ func file_google_protobuf_empty_proto_init() { MessageInfos: file_google_protobuf_empty_proto_msgTypes, }.Build() File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil file_google_protobuf_empty_proto_goTypes = nil file_google_protobuf_empty_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e5d7da38c..91ee89a5c 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -83,6 +83,7 @@ import ( sort "sort" strings "strings" sync "sync" + unsafe "unsafe" ) // `FieldMask` represents a set of symbolic field paths, for example: @@ -503,32 +504,21 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ - 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, - 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, - 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, - 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, - 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61, - 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_google_protobuf_field_mask_proto_rawDesc = "" + + "\n" + + " google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" + + "\tFieldMask\x12\x14\n" + + "\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" + + "\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc + file_google_protobuf_field_mask_proto_rawDescData []byte ) func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc))) }) return file_google_protobuf_field_mask_proto_rawDescData } @@ -554,7 +544,7 @@ func file_google_protobuf_field_mask_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -565,7 +555,6 @@ func file_google_protobuf_field_mask_proto_init() { MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, }.Build() File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil file_google_protobuf_field_mask_proto_goTypes = nil file_google_protobuf_field_mask_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index f2c53ea33..30411b728 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -128,6 +128,7 @@ import ( reflect "reflect" sync "sync" utf8 "unicode/utf8" + unsafe "unsafe" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -671,64 +672,40 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = []byte{ - 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, - 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69, - 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, - 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, - 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62, - 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, - 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69, - 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, - 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09, - 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c, - 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, - 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, - 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, - 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -} +const file_google_protobuf_struct_proto_rawDesc = "" + + "\n" + + "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" + + "\x06Struct\x12;\n" + + "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" + + "\vFieldsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" + + "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" + + "\x05Value\x12;\n" + + "\n" + + "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" + + "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" + + "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" + + "\n" + + "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" + + "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" + + "\n" + + "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" + + "\x04kind\";\n" + + "\tListValue\x12.\n" + + "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" + + "\tNullValue\x12\x0e\n" + + "\n" + + "NULL_VALUE\x10\x00B\x7f\n" + + "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc + file_google_protobuf_struct_proto_rawDescData []byte ) func file_google_protobuf_struct_proto_rawDescGZIP() []byte { file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) }) return file_google_protobuf_struct_proto_rawDescData } @@ -773,7 +750,7 @@ func file_google_protobuf_struct_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), NumEnums: 1, NumMessages: 4, NumExtensions: 0, @@ -785,7 +762,6 @@ func file_google_protobuf_struct_proto_init() { MessageInfos: file_google_protobuf_struct_proto_msgTypes, }.Build() File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil file_google_protobuf_struct_proto_goTypes = nil file_google_protobuf_struct_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 9550109aa..06d584c14 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -78,6 +78,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Timestamp represents a point in time independent of any time zone or local @@ -297,33 +298,22 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01, - 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, - 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_google_protobuf_timestamp_proto_rawDesc = "" + + "\n" + + "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" + + "\tTimestamp\x12\x18\n" + + "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" + + "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" + + "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc + file_google_protobuf_timestamp_proto_rawDescData []byte ) func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) }) return file_google_protobuf_timestamp_proto_rawDescData } @@ -349,7 +339,7 @@ func file_google_protobuf_timestamp_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -360,7 +350,6 @@ func file_google_protobuf_timestamp_proto_init() { MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, }.Build() File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil file_google_protobuf_timestamp_proto_goTypes = nil file_google_protobuf_timestamp_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 15b424ec1..b7c2d0607 100644 --- a/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/src/cmd/linuxkit/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -28,10 +28,17 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. +// Wrappers for primitive (non-message) types. These types were needed +// for legacy reasons and are not recommended for use in new APIs. +// +// Historically these wrappers were useful to have presence on proto3 primitive +// fields, but proto3 syntax has been updated to support the `optional` keyword. +// Using that keyword is now the strongly preferred way to add presence to +// proto3 primitive fields. +// +// A secondary usecase was to embed primitives in the `google.protobuf.Any` +// type: it is now recommended that you embed your value in your own wrapper +// message which can be specifically documented. // // These wrappers have no meaningful use within repeated fields as they lack // the ability to detect presence on individual elements. @@ -48,11 +55,15 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // Wrapper message for `double`. // // The JSON representation for `DoubleValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type DoubleValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The double value. @@ -106,6 +117,9 @@ func (x *DoubleValue) GetValue() float64 { // Wrapper message for `float`. // // The JSON representation for `FloatValue` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type FloatValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The float value. @@ -159,6 +173,9 @@ func (x *FloatValue) GetValue() float32 { // Wrapper message for `int64`. // // The JSON representation for `Int64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int64 value. @@ -212,6 +229,9 @@ func (x *Int64Value) GetValue() int64 { // Wrapper message for `uint64`. // // The JSON representation for `UInt64Value` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt64Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint64 value. @@ -265,6 +285,9 @@ func (x *UInt64Value) GetValue() uint64 { // Wrapper message for `int32`. // // The JSON representation for `Int32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type Int32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The int32 value. @@ -318,6 +341,9 @@ func (x *Int32Value) GetValue() int32 { // Wrapper message for `uint32`. // // The JSON representation for `UInt32Value` is JSON number. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type UInt32Value struct { state protoimpl.MessageState `protogen:"open.v1"` // The uint32 value. @@ -371,6 +397,9 @@ func (x *UInt32Value) GetValue() uint32 { // Wrapper message for `bool`. // // The JSON representation for `BoolValue` is JSON `true` and `false`. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BoolValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bool value. @@ -424,6 +453,9 @@ func (x *BoolValue) GetValue() bool { // Wrapper message for `string`. // // The JSON representation for `StringValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type StringValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The string value. @@ -477,6 +509,9 @@ func (x *StringValue) GetValue() string { // Wrapper message for `bytes`. // // The JSON representation for `BytesValue` is JSON string. +// +// Not recommended for use in new APIs, but still useful for legacy APIs and +// has no plan to be removed. type BytesValue struct { state protoimpl.MessageState `protogen:"open.v1"` // The bytes value. @@ -529,50 +564,41 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, - 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, - 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33, - 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09, - 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, - 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, - 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +const file_google_protobuf_wrappers_proto_rawDesc = "" + + "\n" + + "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" + + "\vDoubleValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" + + "\n" + + "FloatValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" + + "\n" + + "Int64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" + + "\vUInt64Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" + + "\n" + + "Int32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" + + "\vUInt32Value\x12\x14\n" + + "\x05value\x18\x01 \x01(\rR\x05value\"!\n" + + "\tBoolValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\bR\x05value\"#\n" + + "\vStringValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" + + "\n" + + "BytesValue\x12\x14\n" + + "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" + + "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3" var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once - file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc + file_google_protobuf_wrappers_proto_rawDescData []byte ) func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { - file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc))) }) return file_google_protobuf_wrappers_proto_rawDescData } @@ -606,7 +632,7 @@ func file_google_protobuf_wrappers_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)), NumEnums: 0, NumMessages: 9, NumExtensions: 0, @@ -617,7 +643,6 @@ func file_google_protobuf_wrappers_proto_init() { MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, }.Build() File_google_protobuf_wrappers_proto = out.File - file_google_protobuf_wrappers_proto_rawDesc = nil file_google_protobuf_wrappers_proto_goTypes = nil file_google_protobuf_wrappers_proto_depIdxs = nil } diff --git a/src/cmd/linuxkit/vendor/modules.txt b/src/cmd/linuxkit/vendor/modules.txt index 7c60af855..8fdf95e6f 100644 --- a/src/cmd/linuxkit/vendor/modules.txt +++ b/src/cmd/linuxkit/vendor/modules.txt @@ -1,9 +1,6 @@ # cloud.google.com/go/compute/metadata v0.6.0 ## explicit; go 1.21 cloud.google.com/go/compute/metadata -# github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 -## explicit; go 1.20 -github.com/AdaLogics/go-fuzz-headers # github.com/Azure/azure-sdk-for-go v56.3.0+incompatible ## explicit github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage @@ -118,14 +115,14 @@ github.com/aws/aws-sdk-go/service/sso github.com/aws/aws-sdk-go/service/sso/ssoiface github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface -# github.com/containerd/console v1.0.4 +# github.com/containerd/console v1.0.5 ## explicit; go 1.13 github.com/containerd/console -# github.com/containerd/containerd/api v1.8.0 -## explicit; go 1.21 +# github.com/containerd/containerd/api v1.9.0 +## explicit; go 1.23.0 github.com/containerd/containerd/api/services/content/v1 -# github.com/containerd/containerd/v2 v2.0.2 -## explicit; go 1.22.0 +# github.com/containerd/containerd/v2 v2.1.3 +## explicit; go 1.23.0 github.com/containerd/containerd/v2/core/content github.com/containerd/containerd/v2/core/content/proxy github.com/containerd/containerd/v2/core/images @@ -133,13 +130,13 @@ github.com/containerd/containerd/v2/core/leases github.com/containerd/containerd/v2/core/remotes github.com/containerd/containerd/v2/core/remotes/docker github.com/containerd/containerd/v2/core/remotes/docker/auth -github.com/containerd/containerd/v2/core/remotes/docker/schema1 github.com/containerd/containerd/v2/core/remotes/errors +github.com/containerd/containerd/v2/core/transfer github.com/containerd/containerd/v2/defaults github.com/containerd/containerd/v2/internal/fsverity +github.com/containerd/containerd/v2/internal/lazyregexp github.com/containerd/containerd/v2/internal/randutil github.com/containerd/containerd/v2/pkg/archive/compression -github.com/containerd/containerd/v2/pkg/deprecation github.com/containerd/containerd/v2/pkg/filters github.com/containerd/containerd/v2/pkg/identifiers github.com/containerd/containerd/v2/pkg/kernelversion @@ -161,6 +158,7 @@ github.com/containerd/errdefs # github.com/containerd/errdefs/pkg v0.3.0 ## explicit; go 1.22 github.com/containerd/errdefs/pkg/errgrpc +github.com/containerd/errdefs/pkg/errhttp github.com/containerd/errdefs/pkg/internal/cause github.com/containerd/errdefs/pkg/internal/types # github.com/containerd/log v0.1.0 @@ -197,7 +195,7 @@ github.com/docker/buildx/util/logutil github.com/docker/buildx/util/metricutil github.com/docker/buildx/util/progress github.com/docker/buildx/version -# github.com/docker/cli v28.0.0-rc.2+incompatible +# github.com/docker/cli v28.2.2+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -209,11 +207,12 @@ github.com/docker/cli/cli/connhelper/ssh # github.com/docker/distribution v2.8.3+incompatible ## explicit github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v28.0.0-rc.2+incompatible +# github.com/docker/docker v28.2.2+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types github.com/docker/docker/api/types/blkiodev +github.com/docker/docker/api/types/build github.com/docker/docker/api/types/checkpoint github.com/docker/docker/api/types/common github.com/docker/docker/api/types/container @@ -232,11 +231,10 @@ github.com/docker/docker/api/types/time github.com/docker/docker/api/types/versions github.com/docker/docker/api/types/volume github.com/docker/docker/client -github.com/docker/docker/errdefs github.com/docker/docker/internal/lazyregexp github.com/docker/docker/internal/multierror -# github.com/docker/docker-credential-helpers v0.8.2 -## explicit; go 1.19 +# github.com/docker/docker-credential-helpers v0.9.3 +## explicit; go 1.21 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.5.0 @@ -278,8 +276,8 @@ github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-cmp v0.6.0 -## explicit; go 1.13 +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -379,8 +377,8 @@ github.com/gophercloud/utils/openstack/clientconfig # github.com/gorilla/websocket v1.5.0 ## explicit; go 1.12 github.com/gorilla/websocket -# github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 -## explicit; go 1.21 +# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 +## explicit; go 1.22 github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule github.com/grpc-ecosystem/grpc-gateway/v2/runtime github.com/grpc-ecosystem/grpc-gateway/v2/utilities @@ -396,25 +394,27 @@ github.com/hashicorp/go-multierror # github.com/hashicorp/go-version v1.7.0 ## explicit github.com/hashicorp/go-version -# github.com/in-toto/in-toto-golang v0.5.0 -## explicit; go 1.17 +# github.com/in-toto/in-toto-golang v0.9.0 +## explicit; go 1.20 github.com/in-toto/in-toto-golang/in_toto github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.1 github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2 +github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1 # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap # github.com/jmespath/go-jmespath v0.4.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath -# github.com/klauspost/compress v1.17.11 -## explicit; go 1.21 +# github.com/klauspost/compress v1.18.0 +## explicit; go 1.22 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/le github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -434,8 +434,8 @@ github.com/mitchellh/go-ps # github.com/mitchellh/hashstructure/v2 v2.0.2 ## explicit; go 1.14 github.com/mitchellh/hashstructure/v2 -# github.com/moby/buildkit v0.20.0 -## explicit; go 1.22.0 +# github.com/moby/buildkit v0.23.1 +## explicit; go 1.23.0 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types github.com/moby/buildkit/client @@ -448,6 +448,7 @@ github.com/moby/buildkit/client/llb/sourceresolver github.com/moby/buildkit/client/ociindex github.com/moby/buildkit/cmd/buildctl/build github.com/moby/buildkit/errdefs +github.com/moby/buildkit/executor/resources/types github.com/moby/buildkit/exporter/containerimage/exptypes github.com/moby/buildkit/exporter/exptypes github.com/moby/buildkit/frontend/attestations @@ -478,6 +479,7 @@ github.com/moby/buildkit/session/sshforward/sshprovider github.com/moby/buildkit/session/upload github.com/moby/buildkit/session/upload/uploadprovider github.com/moby/buildkit/solver/errdefs +github.com/moby/buildkit/solver/llbsolver/provenance/types github.com/moby/buildkit/solver/pb github.com/moby/buildkit/solver/result github.com/moby/buildkit/source/types @@ -544,11 +546,11 @@ github.com/moul/gotty-client # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0 +# github.com/opencontainers/image-spec v1.1.1 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runtime-spec v1.2.0 +# github.com/opencontainers/runtime-spec v1.2.1 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/pkg/errors v0.9.1 @@ -603,10 +605,11 @@ github.com/scaleway/scaleway-sdk-go/logger github.com/scaleway/scaleway-sdk-go/namegenerator github.com/scaleway/scaleway-sdk-go/scw github.com/scaleway/scaleway-sdk-go/validation -# github.com/secure-systems-lab/go-securesystemslib v0.4.0 -## explicit; go 1.17 +# github.com/secure-systems-lab/go-securesystemslib v0.6.0 +## explicit; go 1.20 github.com/secure-systems-lab/go-securesystemslib/cjson github.com/secure-systems-lab/go-securesystemslib/dsse +github.com/secure-systems-lab/go-securesystemslib/signerverifier # github.com/shibumi/go-pathspec v1.3.0 ## explicit; go 1.17 github.com/shibumi/go-pathspec @@ -615,10 +618,11 @@ github.com/shibumi/go-pathspec github.com/sirupsen/logrus # github.com/smartystreets/goconvey v1.8.1 ## explicit; go 1.18 -# github.com/spdx/tools-golang v0.5.3 +# github.com/spdx/tools-golang v0.5.5 ## explicit; go 1.13 github.com/spdx/tools-golang/convert github.com/spdx/tools-golang/json +github.com/spdx/tools-golang/json/marshal github.com/spdx/tools-golang/spdx github.com/spdx/tools-golang/spdx/common github.com/spdx/tools-golang/spdx/v2/common @@ -639,11 +643,11 @@ github.com/stretchr/testify/require # github.com/surma/gocpio v1.0.2-0.20160926205914-fcb68777e7dc ## explicit github.com/surma/gocpio -# github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a +# github.com/tonistiigi/fsutil v0.0.0-20250605211040-586307ad452f ## explicit; go 1.21 github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/types -# github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 +# github.com/tonistiigi/go-csvvalue v0.0.0-20240814133006-030d3b2625d0 ## explicit; go 1.16 github.com/tonistiigi/go-csvvalue # github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea @@ -652,7 +656,7 @@ github.com/tonistiigi/units # github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab ## explicit; go 1.12 github.com/tonistiigi/vt100 -# github.com/vbatts/tar-split v0.11.6 +# github.com/vbatts/tar-split v0.12.1 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar # github.com/vmware/govmomi v0.20.3 @@ -704,21 +708,21 @@ go.opencensus.io/trace/tracestate ## explicit; go 1.22.0 go.opentelemetry.io/auto/sdk go.opentelemetry.io/auto/sdk/internal/telemetry -# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 -## explicit; go 1.22 +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 +## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal # go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.56.0 ## explicit; go 1.22 go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/internal/semconvutil -# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 ## explicit; go 1.22.0 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil -# go.opentelemetry.io/otel v1.33.0 +# go.opentelemetry.io/otel v1.35.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel go.opentelemetry.io/otel/attribute @@ -733,16 +737,16 @@ go.opentelemetry.io/otel/semconv/v1.17.0 go.opentelemetry.io/otel/semconv/v1.20.0 go.opentelemetry.io/otel/semconv/v1.21.0 go.opentelemetry.io/otel/semconv/v1.26.0 -# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 -## explicit; go 1.22 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 +## explicit; go 1.22.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform -# go.opentelemetry.io/otel/metric v1.33.0 +# go.opentelemetry.io/otel/metric v1.35.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/metric go.opentelemetry.io/otel/metric/embedded go.opentelemetry.io/otel/metric/noop -# go.opentelemetry.io/otel/sdk v1.33.0 +# go.opentelemetry.io/otel/sdk v1.35.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/sdk go.opentelemetry.io/otel/sdk/instrumentation @@ -750,19 +754,20 @@ go.opentelemetry.io/otel/sdk/internal/env go.opentelemetry.io/otel/sdk/internal/x go.opentelemetry.io/otel/sdk/resource go.opentelemetry.io/otel/sdk/trace -# go.opentelemetry.io/otel/trace v1.33.0 +# go.opentelemetry.io/otel/trace v1.35.0 ## explicit; go 1.22.0 go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded +go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.opentelemetry.io/proto/otlp v1.3.1 -## explicit; go 1.17 +# go.opentelemetry.io/proto/otlp v1.5.0 +## explicit; go 1.22.0 go.opentelemetry.io/proto/otlp/collector/trace/v1 go.opentelemetry.io/proto/otlp/common/v1 go.opentelemetry.io/proto/otlp/resource/v1 go.opentelemetry.io/proto/otlp/trace/v1 -# golang.org/x/crypto v0.31.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.37.0 +## explicit; go 1.23.0 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -779,20 +784,21 @@ golang.org/x/crypto/pkcs12/internal/rc2 golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf -# golang.org/x/mod v0.22.0 -## explicit; go 1.22.0 +# golang.org/x/mod v0.24.0 +## explicit; go 1.23.0 golang.org/x/mod/semver -# golang.org/x/net v0.33.0 -## explicit; go 1.18 +# golang.org/x/net v0.39.0 +## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/httpcommon golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/oauth2 v0.25.0 -## explicit; go 1.18 +# golang.org/x/oauth2 v0.27.0 +## explicit; go 1.23.0 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/google @@ -803,22 +809,22 @@ golang.org/x/oauth2/google/internal/stsexchange golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.10.0 -## explicit; go 1.18 +# golang.org/x/sync v0.14.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.29.0 -## explicit; go 1.18 +# golang.org/x/sys v0.33.0 +## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.27.0 -## explicit; go 1.18 +# golang.org/x/term v0.31.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.21.0 -## explicit; go 1.18 +# golang.org/x/text v0.24.0 +## explicit; go 1.23.0 golang.org/x/text/cases golang.org/x/text/internal golang.org/x/text/internal/language @@ -829,8 +835,8 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.6.0 -## explicit; go 1.18 +# golang.org/x/time v0.11.0 +## explicit; go 1.23.0 golang.org/x/time/rate # google.golang.org/api v0.149.0 ## explicit; go 1.19 @@ -847,21 +853,22 @@ google.golang.org/api/option/internaloption google.golang.org/api/storage/v1 google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation -# google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/api v0.0.0-20250218202821-56aae31c358a +## explicit; go 1.22 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 -## explicit; go 1.21 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20250218202821-56aae31c358a +## explicit; go 1.22 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.69.4 -## explicit; go 1.22 +# google.golang.org/grpc v1.72.2 +## explicit; go 1.23 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff google.golang.org/grpc/balancer google.golang.org/grpc/balancer/base +google.golang.org/grpc/balancer/endpointsharding google.golang.org/grpc/balancer/grpclb/state google.golang.org/grpc/balancer/pickfirst google.golang.org/grpc/balancer/pickfirst/internal @@ -895,7 +902,9 @@ google.golang.org/grpc/internal/grpcutil google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty +google.golang.org/grpc/internal/proxyattributes google.golang.org/grpc/internal/resolver +google.golang.org/grpc/internal/resolver/delegatingresolver google.golang.org/grpc/internal/resolver/dns google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough @@ -916,8 +925,8 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.3 -## explicit; go 1.21 +# google.golang.org/protobuf v1.36.6 +## explicit; go 1.22 google.golang.org/protobuf/encoding/protojson google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire